aboutsummaryrefslogtreecommitdiffstats
path: root/extras/mini-os/include
diff options
context:
space:
mode:
Diffstat (limited to 'extras/mini-os/include')
-rw-r--r--extras/mini-os/include/events.h9
-rw-r--r--extras/mini-os/include/mm.h175
-rw-r--r--extras/mini-os/include/sched.h29
-rw-r--r--extras/mini-os/include/spinlock.h55
-rw-r--r--extras/mini-os/include/time.h6
-rw-r--r--extras/mini-os/include/x86/arch_mm.h209
-rw-r--r--extras/mini-os/include/x86/arch_sched.h58
-rw-r--r--extras/mini-os/include/x86/arch_spinlock.h (renamed from extras/mini-os/include/x86/spinlock.h)38
-rw-r--r--extras/mini-os/include/x86/os.h7
-rw-r--r--extras/mini-os/include/x86/x86_32/hypercall-x86_32.h6
-rw-r--r--extras/mini-os/include/x86/x86_64/hypercall-x86_64.h6
11 files changed, 371 insertions, 227 deletions
diff --git a/extras/mini-os/include/events.h b/extras/mini-os/include/events.h
index cdb6311845..45b47c1549 100644
--- a/extras/mini-os/include/events.h
+++ b/extras/mini-os/include/events.h
@@ -20,7 +20,7 @@
#define _EVENTS_H_
#include<traps.h>
-#include <xen/event_channel.h>
+#include<xen/event_channel.h>
typedef void (*evtchn_handler_t)(evtchn_port_t, struct pt_regs *, void *);
@@ -39,10 +39,9 @@ int evtchn_bind_interdomain(domid_t pal, evtchn_port_t remote_port,
static inline int notify_remote_via_evtchn(evtchn_port_t port)
{
- evtchn_op_t op;
- op.cmd = EVTCHNOP_send;
- op.u.send.port = port;
- return HYPERVISOR_event_channel_op(&op);
+ evtchn_send_t op;
+ op.port = port;
+ return HYPERVISOR_event_channel_op(EVTCHNOP_send, &op);
}
diff --git a/extras/mini-os/include/mm.h b/extras/mini-os/include/mm.h
index 4c820e07e4..cd53a4bf25 100644
--- a/extras/mini-os/include/mm.h
+++ b/extras/mini-os/include/mm.h
@@ -29,182 +29,15 @@
#include <xen/arch-x86_32.h>
#elif defined(__x86_64__)
#include <xen/arch-x86_64.h>
+#elif defined(__ia64__)
+#include <xen/arch-ia64.h>
#else
#error "Unsupported architecture"
#endif
#include <lib.h>
+#include <arch_mm.h>
-#define L1_FRAME 1
-#define L2_FRAME 2
-#define L3_FRAME 3
-
-#define L1_PAGETABLE_SHIFT 12
-
-#if defined(__i386__)
-
-#if !defined(CONFIG_X86_PAE)
-
-#define L2_PAGETABLE_SHIFT 22
-
-#define L1_PAGETABLE_ENTRIES 1024
-#define L2_PAGETABLE_ENTRIES 1024
-
-#define PADDR_BITS 32
-#define PADDR_MASK (~0UL)
-
-#define NOT_L1_FRAMES 1
-#define PRIpte "08lx"
-typedef unsigned long pgentry_t;
-
-#else /* defined(CONFIG_X86_PAE) */
-
-#define L2_PAGETABLE_SHIFT 21
-#define L3_PAGETABLE_SHIFT 30
-
-#define L1_PAGETABLE_ENTRIES 512
-#define L2_PAGETABLE_ENTRIES 512
-#define L3_PAGETABLE_ENTRIES 4
-
-#define PADDR_BITS 44
-#define PADDR_MASK ((1ULL << PADDR_BITS)-1)
-
-#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
-
-/*
- * If starting from virtual address greater than 0xc0000000,
- * this value will be 2 to account for final mid-level page
- * directory which is always mapped in at this location.
- */
-#define NOT_L1_FRAMES 3
-#define PRIpte "016llx"
-typedef uint64_t pgentry_t;
-
-#endif /* !defined(CONFIG_X86_PAE) */
-
-#elif defined(__x86_64__)
-
-#define L2_PAGETABLE_SHIFT 21
-#define L3_PAGETABLE_SHIFT 30
-#define L4_PAGETABLE_SHIFT 39
-
-#define L1_PAGETABLE_ENTRIES 512
-#define L2_PAGETABLE_ENTRIES 512
-#define L3_PAGETABLE_ENTRIES 512
-#define L4_PAGETABLE_ENTRIES 512
-
-/* These are page-table limitations. Current CPUs support only 40-bit phys. */
-#define PADDR_BITS 52
-#define VADDR_BITS 48
-#define PADDR_MASK ((1UL << PADDR_BITS)-1)
-#define VADDR_MASK ((1UL << VADDR_BITS)-1)
-
-#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
-#define L3_MASK ((1UL << L4_PAGETABLE_SHIFT) - 1)
-
-#define NOT_L1_FRAMES 3
-#define PRIpte "016lx"
-typedef unsigned long pgentry_t;
-
-#endif
-
-#define L1_MASK ((1UL << L2_PAGETABLE_SHIFT) - 1)
-
-/* Given a virtual address, get an entry offset into a page table. */
-#define l1_table_offset(_a) \
- (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
-#define l2_table_offset(_a) \
- (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
-#define l3_table_offset(_a) \
- (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
-#endif
-#if defined(__x86_64__)
-#define l4_table_offset(_a) \
- (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
-#endif
-
-#define _PAGE_PRESENT 0x001UL
-#define _PAGE_RW 0x002UL
-#define _PAGE_USER 0x004UL
-#define _PAGE_PWT 0x008UL
-#define _PAGE_PCD 0x010UL
-#define _PAGE_ACCESSED 0x020UL
-#define _PAGE_DIRTY 0x040UL
-#define _PAGE_PAT 0x080UL
-#define _PAGE_PSE 0x080UL
-#define _PAGE_GLOBAL 0x100UL
-
-#if defined(__i386__)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
-#if defined(CONFIG_X86_PAE)
-#define L3_PROT (_PAGE_PRESENT)
-#endif /* CONFIG_X86_PAE */
-#elif defined(__x86_64__)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#endif /* __i386__ || __x86_64__ */
-
-#ifndef CONFIG_X86_PAE
-#define PAGE_SIZE (1UL << L1_PAGETABLE_SHIFT)
-#else
-#define PAGE_SIZE (1ULL << L1_PAGETABLE_SHIFT)
-#endif
-#define PAGE_SHIFT L1_PAGETABLE_SHIFT
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT)
-#define PFN_DOWN(x) ((x) >> L1_PAGETABLE_SHIFT)
-#define PFN_PHYS(x) ((x) << L1_PAGETABLE_SHIFT)
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-/* Definitions for machine and pseudophysical addresses. */
-#ifdef CONFIG_X86_PAE
-typedef unsigned long long paddr_t;
-typedef unsigned long long maddr_t;
-#else
-typedef unsigned long paddr_t;
-typedef unsigned long maddr_t;
-#endif
-
-extern unsigned long *phys_to_machine_mapping;
-extern char _text, _etext, _edata, _end;
-#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
-static __inline__ maddr_t phys_to_machine(paddr_t phys)
-{
- maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
- machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
- return machine;
-}
-
-#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
-static __inline__ paddr_t machine_to_phys(maddr_t machine)
-{
- paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
- phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
- return phys;
-}
-
-#define VIRT_START ((unsigned long)&_text)
-
-#define to_phys(x) ((unsigned long)(x)-VIRT_START)
-#define to_virt(x) ((void *)((unsigned long)(x)+VIRT_START))
-
-#define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt)))
-#define virt_to_mfn(_virt) (pfn_to_mfn(virt_to_pfn(_virt)))
-#define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach)))
-#define virt_to_mach(_virt) (phys_to_machine(to_phys(_virt)))
-#define mfn_to_virt(_mfn) (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT))
-#define pfn_to_virt(_pfn) (to_virt((_pfn) << PAGE_SHIFT))
-
-/* Pagetable walking. */
-#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT)
-#define pte_to_virt(_pte) to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << PAGE_SHIFT)
void init_mm(void);
unsigned long alloc_pages(int order);
@@ -220,6 +53,8 @@ static __inline__ int get_order(unsigned long size)
return order;
}
+void arch_init_demand_mapping_area(unsigned long max_pfn);
+void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p);
void *map_frames(unsigned long *f, unsigned long n);
diff --git a/extras/mini-os/include/sched.h b/extras/mini-os/include/sched.h
index ce57be8a6c..f162062c45 100644
--- a/extras/mini-os/include/sched.h
+++ b/extras/mini-os/include/sched.h
@@ -2,39 +2,46 @@
#define __SCHED_H__
#include <list.h>
+#include <time.h>
+#include <arch_sched.h>
struct thread
{
char *name;
char *stack;
+#if !defined(__ia64__)
unsigned long sp; /* Stack pointer */
unsigned long ip; /* Instruction pointer */
+#else /* !defined(__ia64__) */
+ thread_regs_t regs;
+#endif /* !defined(__ia64__) */
struct list_head thread_list;
u32 flags;
+ s_time_t wakeup_time;
};
+extern struct thread *idle_thread;
+void idle_thread_fn(void *unused);
+#define RUNNABLE_FLAG 0x00000001
+
+#define is_runnable(_thread) (_thread->flags & RUNNABLE_FLAG)
+#define set_runnable(_thread) (_thread->flags |= RUNNABLE_FLAG)
+#define clear_runnable(_thread) (_thread->flags &= ~RUNNABLE_FLAG)
+
+#define switch_threads(prev, next) arch_switch_threads(prev, next)
+
void init_sched(void);
void run_idle_thread(void);
struct thread* create_thread(char *name, void (*function)(void *), void *data);
void schedule(void);
-static inline struct thread* get_current(void)
-{
- struct thread **current;
-#ifdef __i386__
- __asm__("andl %%esp,%0; ":"=r" (current) : "r" (~8191UL));
-#else
- __asm__("andq %%rsp,%0; ":"=r" (current) : "r" (~8191UL));
-#endif
- return *current;
-}
-
#define current get_current()
void wake(struct thread *thread);
void block(struct thread *thread);
+void sleep(u32 millisecs);
#endif /* __SCHED_H__ */
diff --git a/extras/mini-os/include/spinlock.h b/extras/mini-os/include/spinlock.h
new file mode 100644
index 0000000000..ecfe73627e
--- /dev/null
+++ b/extras/mini-os/include/spinlock.h
@@ -0,0 +1,55 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <lib.h>
+
+/*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ */
+
+typedef struct {
+ volatile unsigned int slock;
+} spinlock_t;
+
+
+#include "arch_spinlock.h"
+
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#define SPIN_LOCK_UNLOCKED ARCH_SPIN_LOCK_UNLOCKED
+
+#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define spin_is_locked(x) arch_spin_is_locked(x)
+
+#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
+
+
+#define _spin_trylock(lock) ({_raw_spin_trylock(lock) ? \
+ 1 : ({ 0;});})
+
+#define _spin_lock(lock) \
+do { \
+ _raw_spin_lock(lock); \
+} while(0)
+
+#define _spin_unlock(lock) \
+do { \
+ _raw_spin_unlock(lock); \
+} while (0)
+
+
+#define spin_lock(lock) _spin_lock(lock)
+#define spin_unlock(lock) _spin_unlock(lock)
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
+
+#endif
diff --git a/extras/mini-os/include/time.h b/extras/mini-os/include/time.h
index aacf1d17e7..468a8d9173 100644
--- a/extras/mini-os/include/time.h
+++ b/extras/mini-os/include/time.h
@@ -7,8 +7,9 @@
* File: time.h
* Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
* Changes: Grzegorz Milos (gm281@cam.ac.uk)
+ * Robert Kaiser (kaiser@informatik.fh-wiesbaden.de)
*
- * Date: Jul 2003, changesJun 2005
+ * Date: Jul 2003, changes: Jun 2005, Sep 2006
*
* Environment: Xen Minimal OS
* Description: Time and timer functions
@@ -57,7 +58,8 @@ struct timespec {
void init_time(void);
s_time_t get_s_time(void);
s_time_t get_v_time(void);
+u64 monotonic_clock(void);
void gettimeofday(struct timeval *tv);
-void block_domain(u32 millisecs);
+void block_domain(s_time_t until);
#endif /* _TIME_H_ */
diff --git a/extras/mini-os/include/x86/arch_mm.h b/extras/mini-os/include/x86/arch_mm.h
new file mode 100644
index 0000000000..795ef070a7
--- /dev/null
+++ b/extras/mini-os/include/x86/arch_mm.h
@@ -0,0 +1,209 @@
+/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
+ *
+ * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
+ * Copyright (c) 2005, Keir A Fraser
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _ARCH_MM_H_
+#define _ARCH_MM_H_
+
+#if defined(__i386__)
+#include <xen/arch-x86_32.h>
+#elif defined(__x86_64__)
+#include <xen/arch-x86_64.h>
+#else
+#error "Unsupported architecture"
+#endif
+
+#define L1_FRAME 1
+#define L2_FRAME 2
+#define L3_FRAME 3
+
+#define L1_PAGETABLE_SHIFT 12
+
+#if defined(__i386__)
+
+#if !defined(CONFIG_X86_PAE)
+
+#define L2_PAGETABLE_SHIFT 22
+
+#define L1_PAGETABLE_ENTRIES 1024
+#define L2_PAGETABLE_ENTRIES 1024
+
+#define PADDR_BITS 32
+#define PADDR_MASK (~0UL)
+
+#define NOT_L1_FRAMES 1
+#define PRIpte "08lx"
+typedef unsigned long pgentry_t;
+
+#else /* defined(CONFIG_X86_PAE) */
+
+#define L2_PAGETABLE_SHIFT 21
+#define L3_PAGETABLE_SHIFT 30
+
+#define L1_PAGETABLE_ENTRIES 512
+#define L2_PAGETABLE_ENTRIES 512
+#define L3_PAGETABLE_ENTRIES 4
+
+#define PADDR_BITS 44
+#define PADDR_MASK ((1ULL << PADDR_BITS)-1)
+
+#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
+
+/*
+ * If starting from virtual address greater than 0xc0000000,
+ * this value will be 2 to account for final mid-level page
+ * directory which is always mapped in at this location.
+ */
+#define NOT_L1_FRAMES 3
+#define PRIpte "016llx"
+typedef uint64_t pgentry_t;
+
+#endif /* !defined(CONFIG_X86_PAE) */
+
+#elif defined(__x86_64__)
+
+#define L2_PAGETABLE_SHIFT 21
+#define L3_PAGETABLE_SHIFT 30
+#define L4_PAGETABLE_SHIFT 39
+
+#define L1_PAGETABLE_ENTRIES 512
+#define L2_PAGETABLE_ENTRIES 512
+#define L3_PAGETABLE_ENTRIES 512
+#define L4_PAGETABLE_ENTRIES 512
+
+/* These are page-table limitations. Current CPUs support only 40-bit phys. */
+#define PADDR_BITS 52
+#define VADDR_BITS 48
+#define PADDR_MASK ((1UL << PADDR_BITS)-1)
+#define VADDR_MASK ((1UL << VADDR_BITS)-1)
+
+#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
+#define L3_MASK ((1UL << L4_PAGETABLE_SHIFT) - 1)
+
+#define NOT_L1_FRAMES 3
+#define PRIpte "016lx"
+typedef unsigned long pgentry_t;
+
+#endif
+
+#define L1_MASK ((1UL << L2_PAGETABLE_SHIFT) - 1)
+
+/* Given a virtual address, get an entry offset into a page table. */
+#define l1_table_offset(_a) \
+ (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
+#define l2_table_offset(_a) \
+ (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
+#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
+#define l3_table_offset(_a) \
+ (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
+#endif
+#if defined(__x86_64__)
+#define l4_table_offset(_a) \
+ (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
+#endif
+
+#define _PAGE_PRESENT 0x001UL
+#define _PAGE_RW 0x002UL
+#define _PAGE_USER 0x004UL
+#define _PAGE_PWT 0x008UL
+#define _PAGE_PCD 0x010UL
+#define _PAGE_ACCESSED 0x020UL
+#define _PAGE_DIRTY 0x040UL
+#define _PAGE_PAT 0x080UL
+#define _PAGE_PSE 0x080UL
+#define _PAGE_GLOBAL 0x100UL
+
+#if defined(__i386__)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
+#if defined(CONFIG_X86_PAE)
+#define L3_PROT (_PAGE_PRESENT)
+#endif /* CONFIG_X86_PAE */
+#elif defined(__x86_64__)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#endif /* __i386__ || __x86_64__ */
+
+#ifndef CONFIG_X86_PAE
+#define PAGE_SIZE (1UL << L1_PAGETABLE_SHIFT)
+#else
+#define PAGE_SIZE (1ULL << L1_PAGETABLE_SHIFT)
+#endif
+#define PAGE_SHIFT L1_PAGETABLE_SHIFT
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT)
+#define PFN_DOWN(x) ((x) >> L1_PAGETABLE_SHIFT)
+#define PFN_PHYS(x) ((x) << L1_PAGETABLE_SHIFT)
+#define PHYS_PFN(x) ((x) >> L1_PAGETABLE_SHIFT)
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* Definitions for machine and pseudophysical addresses. */
+#ifdef CONFIG_X86_PAE
+typedef unsigned long long paddr_t;
+typedef unsigned long long maddr_t;
+#else
+typedef unsigned long paddr_t;
+typedef unsigned long maddr_t;
+#endif
+
+extern unsigned long *phys_to_machine_mapping;
+extern char _text, _etext, _edata, _end;
+#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
+static __inline__ maddr_t phys_to_machine(paddr_t phys)
+{
+ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+ return machine;
+}
+
+#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
+static __inline__ paddr_t machine_to_phys(maddr_t machine)
+{
+ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+ return phys;
+}
+
+#define VIRT_START ((unsigned long)&_text)
+
+#define to_phys(x) ((unsigned long)(x)-VIRT_START)
+#define to_virt(x) ((void *)((unsigned long)(x)+VIRT_START))
+
+#define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt)))
+#define virt_to_mfn(_virt) (pfn_to_mfn(virt_to_pfn(_virt)))
+#define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach)))
+#define virt_to_mach(_virt) (phys_to_machine(to_phys(_virt)))
+#define mfn_to_virt(_mfn) (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT))
+#define pfn_to_virt(_pfn) (to_virt((_pfn) << PAGE_SHIFT))
+
+/* Pagetable walking. */
+#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT)
+#define pte_to_virt(_pte) to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << PAGE_SHIFT)
+
+
+#endif /* _ARCH_MM_H_ */
diff --git a/extras/mini-os/include/x86/arch_sched.h b/extras/mini-os/include/x86/arch_sched.h
new file mode 100644
index 0000000000..e02dbd05a5
--- /dev/null
+++ b/extras/mini-os/include/x86/arch_sched.h
@@ -0,0 +1,58 @@
+
+#ifndef __ARCH_SCHED_H__
+#define __ARCH_SCHED_H__
+
+
+static inline struct thread* get_current(void)
+{
+ struct thread **current;
+#ifdef __i386__
+ __asm__("andl %%esp,%0; ":"=r" (current) : "r" (~8191UL));
+#else
+ __asm__("andq %%rsp,%0; ":"=r" (current) : "r" (~8191UL));
+#endif
+ return *current;
+}
+
+#ifdef __i386__
+#define arch_switch_threads(prev, next) do { \
+ unsigned long esi,edi; \
+ __asm__ __volatile__("pushfl\n\t" \
+ "pushl %%ebp\n\t" \
+ "movl %%esp,%0\n\t" /* save ESP */ \
+ "movl %4,%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%1\n\t" /* save EIP */ \
+ "pushl %5\n\t" /* restore EIP */ \
+ "ret\n\t" \
+ "1:\t" \
+ "popl %%ebp\n\t" \
+ "popfl" \
+ :"=m" (prev->sp),"=m" (prev->ip), \
+ "=S" (esi),"=D" (edi) \
+ :"m" (next->sp),"m" (next->ip), \
+ "2" (prev), "d" (next)); \
+} while (0)
+#elif __x86_64__
+#define arch_switch_threads(prev, next) do { \
+ unsigned long rsi,rdi; \
+ __asm__ __volatile__("pushfq\n\t" \
+ "pushq %%rbp\n\t" \
+ "movq %%rsp,%0\n\t" /* save RSP */ \
+ "movq %4,%%rsp\n\t" /* restore RSP */ \
+ "movq $1f,%1\n\t" /* save RIP */ \
+ "pushq %5\n\t" /* restore RIP */ \
+ "ret\n\t" \
+ "1:\t" \
+ "popq %%rbp\n\t" \
+ "popfq" \
+ :"=m" (prev->sp),"=m" (prev->ip), \
+ "=S" (rsi),"=D" (rdi) \
+ :"m" (next->sp),"m" (next->ip), \
+ "2" (prev), "d" (next)); \
+} while (0)
+#endif
+
+
+
+
+#endif /* __ARCH_SCHED_H__ */
diff --git a/extras/mini-os/include/x86/spinlock.h b/extras/mini-os/include/x86/arch_spinlock.h
index 4274cd2869..a181ed3c92 100644
--- a/extras/mini-os/include/x86/spinlock.h
+++ b/extras/mini-os/include/x86/arch_spinlock.h
@@ -1,21 +1,12 @@
-#ifndef __ASM_SPINLOCK_H
-#define __ASM_SPINLOCK_H
-#include <lib.h>
-
-/*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-typedef struct {
- volatile unsigned int slock;
-} spinlock_t;
+#ifndef __ARCH_ASM_SPINLOCK_H
+#define __ARCH_ASM_SPINLOCK_H
-#define SPINLOCK_MAGIC 0xdead4ead
+#include <lib.h>
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+#define ARCH_SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
@@ -24,7 +15,7 @@ typedef struct {
* We make no fairness assumptions. They have a cost.
*/
-#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
+#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#define spin_lock_string \
@@ -99,23 +90,4 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
:"=m" (lock->slock) : "r" (flags) : "memory");
}
-#define _spin_trylock(lock) ({_raw_spin_trylock(lock) ? \
- 1 : ({ 0;});})
-
-#define _spin_lock(lock) \
-do { \
- _raw_spin_lock(lock); \
-} while(0)
-
-#define _spin_unlock(lock) \
-do { \
- _raw_spin_unlock(lock); \
-} while (0)
-
-
-#define spin_lock(lock) _spin_lock(lock)
-#define spin_unlock(lock) _spin_unlock(lock)
-
-#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
-
#endif
diff --git a/extras/mini-os/include/x86/os.h b/extras/mini-os/include/x86/os.h
index 2b6ed5512b..80f5586a49 100644
--- a/extras/mini-os/include/x86/os.h
+++ b/extras/mini-os/include/x86/os.h
@@ -19,6 +19,8 @@
#include <types.h>
#include <hypervisor.h>
+#define USED __attribute__ ((used))
+
extern void do_exit(void);
#define BUG do_exit
@@ -61,6 +63,11 @@ extern shared_info_t *HYPERVISOR_shared_info;
void trap_init(void);
+void arch_init(start_info_t *si);
+void arch_print_info(void);
+
+
+
/*
diff --git a/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h
index 6556c4f7e2..5f8b51f872 100644
--- a/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h
+++ b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h
@@ -167,7 +167,7 @@ HYPERVISOR_fpu_taskswitch(
static inline int
HYPERVISOR_sched_op(
- int cmd, unsigned long arg)
+ int cmd, void *arg)
{
return _hypercall2(int, sched_op, cmd, arg);
}
@@ -238,9 +238,9 @@ HYPERVISOR_update_va_mapping(
static inline int
HYPERVISOR_event_channel_op(
- void *op)
+ int cmd, void *op)
{
- return _hypercall1(int, event_channel_op, op);
+ return _hypercall2(int, event_channel_op, cmd, op);
}
static inline int
diff --git a/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h
index 6a68a10b02..2d2904a218 100644
--- a/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h
+++ b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h
@@ -171,7 +171,7 @@ HYPERVISOR_fpu_taskswitch(
static inline int
HYPERVISOR_sched_op(
- int cmd, unsigned long arg)
+ int cmd, void *arg)
{
return _hypercall2(int, sched_op, cmd, arg);
}
@@ -235,9 +235,9 @@ HYPERVISOR_update_va_mapping(
static inline int
HYPERVISOR_event_channel_op(
- void *op)
+ int cmd, void *op)
{
- return _hypercall1(int, event_channel_op, op);
+ return _hypercall2(int, event_channel_op, cmd, op);
}
static inline int