aboutsummaryrefslogtreecommitdiffstats
path: root/xenolinux-2.4.23-sparse/include/asm-xeno
diff options
context:
space:
mode:
Diffstat (limited to 'xenolinux-2.4.23-sparse/include/asm-xeno')
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/bugs.h53
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/desc.h41
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/fixmap.h101
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/highmem.h2
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/hw_irq.h61
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/hypervisor.h410
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/irq.h31
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/keyboard.h95
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/mmu_context.h73
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/msr.h138
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/multicall.h84
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/page.h173
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/pgalloc.h274
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/pgtable-2level.h71
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/pgtable.h370
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/proc_cmd.h28
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/processor.h481
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/ptrace.h63
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/segment.h15
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/smp.h102
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/suspend.h25
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/system.h400
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/vga.h42
-rw-r--r--xenolinux-2.4.23-sparse/include/asm-xeno/xeno_proc.h13
24 files changed, 3146 insertions, 0 deletions
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/bugs.h b/xenolinux-2.4.23-sparse/include/asm-xeno/bugs.h
new file mode 100644
index 0000000000..c46b6a0b15
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/bugs.h
@@ -0,0 +1,53 @@
+/*
+ * include/asm-i386/bugs.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Cyrix stuff, June 1998 by:
+ * - Rafael R. Reilova (moved everything from head.S),
+ * <rreilova@ececs.uc.edu>
+ * - Channing Corn (tests & fixes),
+ * - Andrew D. Balsa (code cleanup).
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ * void check_bugs(void);
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+
+
+static void __init check_fpu(void)
+{
+ boot_cpu_data.fdiv_bug = 0;
+}
+
+static void __init check_hlt(void)
+{
+ boot_cpu_data.hlt_works_ok = 1;
+}
+
+static void __init check_bugs(void)
+{
+ extern void __init boot_init_fpu(void);
+
+ identify_cpu(&boot_cpu_data);
+ boot_init_fpu();
+#ifndef CONFIG_SMP
+ printk("CPU: ");
+ print_cpu_info(&boot_cpu_data);
+#endif
+ check_fpu();
+ check_hlt();
+ system_utsname.machine[1] = '0' +
+ (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
+}
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/desc.h b/xenolinux-2.4.23-sparse/include/asm-xeno/desc.h
new file mode 100644
index 0000000000..545b7f8256
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/desc.h
@@ -0,0 +1,41 @@
+#ifndef __ARCH_DESC_H
+#define __ARCH_DESC_H
+
+#include <asm/ldt.h>
+
+#ifndef __ASSEMBLY__
+
+struct desc_struct {
+ unsigned long a,b;
+};
+
+struct Xgt_desc_struct {
+ unsigned short size;
+ unsigned long address __attribute__((packed));
+};
+
+extern struct desc_struct default_ldt[];
+
+static inline void clear_LDT(void)
+{
+ /*
+ * NB. We load the default_ldt for lcall7/27 handling on demand, as
+ * it slows down context switching. Noone uses it anyway.
+ */
+ queue_set_ldt(0, 0);
+}
+
+static inline void load_LDT(struct mm_struct *mm)
+{
+ void *segments = mm->context.segments;
+ int count = 0;
+
+ if ( unlikely(segments != NULL) )
+ count = LDT_ENTRIES;
+
+ queue_set_ldt((unsigned long)segments, count);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARCH_DESC_H__ */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/fixmap.h b/xenolinux-2.4.23-sparse/include/asm-xeno/fixmap.h
new file mode 100644
index 0000000000..2441b01d4e
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/fixmap.h
@@ -0,0 +1,101 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM_XXX
+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+ FIX_BLKRING_BASE,
+ FIX_NETRING0_BASE,
+ FIX_NETRING1_BASE,
+ FIX_NETRING2_BASE,
+ FIX_NETRING3_BASE,
+ FIX_SHARED_INFO,
+
+#ifdef CONFIG_VGA_CONSOLE
+#define NR_FIX_BTMAPS 32 /* 128KB For the Dom0 VGA Console A0000-C0000 */
+#else
+#define NR_FIX_BTMAPS 1 /* in case anyone wants it in future... */
+#endif
+ FIX_BTMAP_END,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
+ /* our bt_ioremap is permanent, unlike other architectures */
+
+ __end_of_permanent_fixed_addresses,
+ __end_of_fixed_addresses = __end_of_permanent_fixed_addresses
+};
+
+extern void __set_fixmap (enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);
+
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+extern void clear_fixmap(enum fixed_addresses idx);
+
+/*
+ * used by vmalloc.c.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap, and leave one page empty
+ * at the top of mem..
+ */
+#define FIXADDR_TOP (HYPERVISOR_VIRT_START - 2*PAGE_SIZE)
+#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static inline unsigned long fix_to_virt(unsigned int idx)
+{
+ return __fix_to_virt(idx);
+}
+
+#endif
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/highmem.h b/xenolinux-2.4.23-sparse/include/asm-xeno/highmem.h
new file mode 100644
index 0000000000..7e56b1b32d
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/highmem.h
@@ -0,0 +1,2 @@
+#error "Highmem unsupported!"
+
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/hw_irq.h b/xenolinux-2.4.23-sparse/include/asm-xeno/hw_irq.h
new file mode 100644
index 0000000000..d99d15bd24
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/hw_irq.h
@@ -0,0 +1,61 @@
+#ifndef _ASM_HW_IRQ_H
+#define _ASM_HW_IRQ_H
+
+/*
+ * linux/include/asm/hw_irq.h
+ *
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ */
+
+#include <linux/config.h>
+#include <linux/smp.h>
+#include <asm/atomic.h>
+#include <asm/irq.h>
+
+#define SYSCALL_VECTOR 0x80
+
+extern int irq_vector[NR_IRQS];
+
+extern atomic_t irq_err_count;
+extern atomic_t irq_mis_count;
+
+extern char _stext, _etext;
+
+extern unsigned long prof_cpu_mask;
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+
+/*
+ * x86 profiling function, SMP safe. We might want to do this in
+ * assembly totally?
+ */
+static inline void x86_do_profile (unsigned long eip)
+{
+ if (!prof_buffer)
+ return;
+
+ /*
+ * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
+ * (default is all CPUs.)
+ */
+ if (!((1<<smp_processor_id()) & prof_cpu_mask))
+ return;
+
+ eip -= (unsigned long) &_stext;
+ eip >>= prof_shift;
+ /*
+ * Don't ignore out-of-bounds EIP values silently,
+ * put them into the last histogram slot, so if
+ * present, they will show up as a sharp peak.
+ */
+ if (eip > prof_len-1)
+ eip = prof_len-1;
+ atomic_inc((atomic_t *)&prof_buffer[eip]);
+}
+
+static inline void hw_resend_irq(struct hw_interrupt_type *h,
+ unsigned int i)
+{}
+
+#endif /* _ASM_HW_IRQ_H */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/hypervisor.h b/xenolinux-2.4.23-sparse/include/asm-xeno/hypervisor.h
new file mode 100644
index 0000000000..0fbf40c951
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/hypervisor.h
@@ -0,0 +1,410 @@
+/******************************************************************************
+ * hypervisor.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002, K A Fraser
+ */
+
+#ifndef __HYPERVISOR_H__
+#define __HYPERVISOR_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+#include <asm/hypervisor-ifs/dom0_ops.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/* arch/xeno/kernel/setup.c */
+union start_info_union
+{
+ start_info_t start_info;
+ char padding[512];
+};
+extern union start_info_union start_info_union;
+#define start_info (start_info_union.start_info)
+
+/* arch/xeno/kernel/hypervisor.c */
+void do_hypervisor_callback(struct pt_regs *regs);
+
+
+/* arch/xeno/mm/hypervisor.c */
+/*
+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
+ * be MACHINE addresses.
+ */
+
+extern unsigned int mmu_update_queue_idx;
+
+void queue_l1_entry_update(pte_t *ptr, unsigned long val);
+void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
+void queue_pt_switch(unsigned long ptr);
+void queue_tlb_flush(void);
+void queue_invlpg(unsigned long ptr);
+void queue_pgd_pin(unsigned long ptr);
+void queue_pgd_unpin(unsigned long ptr);
+void queue_pte_pin(unsigned long ptr);
+void queue_pte_unpin(unsigned long ptr);
+void queue_set_ldt(unsigned long ptr, unsigned long bytes);
+#define MMU_UPDATE_DEBUG 0
+
+#define queue_unchecked_mmu_update(_p,_v) queue_l1_entry_update( \
+ (pte_t *)((unsigned long)(_p)|MMU_UNCHECKED_PT_UPDATE),(_v))
+
+#if MMU_UPDATE_DEBUG > 0
+typedef struct {
+ void *ptr;
+ unsigned long val, pteval;
+ void *ptep;
+ int line; char *file;
+} page_update_debug_t;
+extern page_update_debug_t update_debug_queue[];
+#define queue_l1_entry_update(_p,_v) ({ \
+ update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
+ update_debug_queue[mmu_update_queue_idx].val = (_v); \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
+ queue_l1_entry_update((_p),(_v)); \
+})
+#define queue_l2_entry_update(_p,_v) ({ \
+ update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
+ update_debug_queue[mmu_update_queue_idx].val = (_v); \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
+ queue_l2_entry_update((_p),(_v)); \
+})
+#endif
+
+#if MMU_UPDATE_DEBUG > 1
+#undef queue_l1_entry_update
+#undef queue_l2_entry_update
+#define queue_l1_entry_update(_p,_v) ({ \
+ update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
+ update_debug_queue[mmu_update_queue_idx].val = (_v); \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
+ printk("L1 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
+ (_p), pte_val(_p), \
+ (unsigned long)(_v)); \
+ queue_l1_entry_update((_p),(_v)); \
+})
+#define queue_l2_entry_update(_p,_v) ({ \
+ update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
+ update_debug_queue[mmu_update_queue_idx].val = (_v); \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
+ printk("L2 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
+ (_p), pmd_val(_p), \
+ (unsigned long)(_v)); \
+ queue_l2_entry_update((_p),(_v)); \
+})
+#define queue_pt_switch(_p) ({ \
+ printk("PTSWITCH %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
+ queue_pt_switch(_p); \
+})
+#define queue_tlb_flush() ({ \
+ printk("TLB FLUSH %s %d\n", __FILE__, __LINE__); \
+ queue_tlb_flush(); \
+})
+#define queue_invlpg(_p) ({ \
+ printk("INVLPG %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
+ queue_invlpg(_p); \
+})
+#define queue_pgd_pin(_p) ({ \
+ printk("PGD PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
+ queue_pgd_pin(_p); \
+})
+#define queue_pgd_unpin(_p) ({ \
+ printk("PGD UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
+ queue_pgd_unpin(_p); \
+})
+#define queue_pte_pin(_p) ({ \
+ printk("PTE PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
+ queue_pte_pin(_p); \
+})
+#define queue_pte_unpin(_p) ({ \
+ printk("PTE UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
+ queue_pte_unpin(_p); \
+})
+#define queue_set_ldt(_p,_l) ({ \
+ printk("SETL LDT %s %d: %08lx %d\n", __FILE__, __LINE__, (_p), (_l)); \
+ queue_set_ldt((_p), (_l)); \
+})
+#endif
+
+void _flush_page_update_queue(void);
+static inline int flush_page_update_queue(void)
+{
+ unsigned int idx = mmu_update_queue_idx;
+ if ( idx != 0 ) _flush_page_update_queue();
+ return idx;
+}
+#define XENO_flush_page_update_queue() (_flush_page_update_queue())
+void MULTICALL_flush_page_update_queue(void);
+
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+
+static inline int HYPERVISOR_set_trap_table(trap_info_t *table)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_set_trap_table),
+ "b" (table) );
+
+ return ret;
+}
+
+
+static inline int HYPERVISOR_mmu_update(mmu_update_t *req, int count)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_mmu_update),
+ "b" (req), "c" (count) );
+
+ if ( unlikely(ret < 0) )
+ panic("Failed mmu update: %p, %d", req, count);
+
+ return ret;
+}
+
+
+static inline int HYPERVISOR_console_write(const char *str, int count)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_console_write),
+ "b" (str), "c" (count) );
+
+
+ return ret;
+}
+
+static inline int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_set_gdt),
+ "b" (frame_list), "c" (entries) );
+
+
+ return ret;
+}
+
+static inline int HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_stack_switch),
+ "b" (ss), "c" (esp) : "memory" );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_set_callbacks(
+ unsigned long event_selector, unsigned long event_address,
+ unsigned long failsafe_selector, unsigned long failsafe_address)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
+ "b" (event_selector), "c" (event_address),
+ "d" (failsafe_selector), "S" (failsafe_address) : "memory" );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_net_io_op(netop_t *op)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_net_io_op),
+ "b" (op) );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_fpu_taskswitch(void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_yield(void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
+ "b" (SCHEDOP_yield) );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_exit(void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
+ "b" (SCHEDOP_exit) );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_stop(unsigned long srec)
+{
+ int ret;
+ /* NB. On suspend, control software expects a suspend record in %esi. */
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
+ "b" (SCHEDOP_stop), "S" (srec) : "memory" );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_dom0_op(dom0_op_t *dom0_op)
+{
+ int ret;
+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_dom0_op),
+ "b" (dom0_op) : "memory" );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_network_op(void *network_op)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_network_op),
+ "b" (network_op) );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_block_io_op(void * block_io_op)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_block_io_op),
+ "b" (block_io_op) );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_set_debugreg(int reg, unsigned long value)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_set_debugreg),
+ "b" (reg), "c" (value) );
+
+ return ret;
+}
+
+static inline unsigned long HYPERVISOR_get_debugreg(int reg)
+{
+ unsigned long ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_get_debugreg),
+ "b" (reg) );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_update_descriptor(
+ unsigned long pa, unsigned long word1, unsigned long word2)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_update_descriptor),
+ "b" (pa), "c" (word1), "d" (word2) );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_set_fast_trap(int idx)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_set_fast_trap),
+ "b" (idx) );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_dom_mem_op(void *dom_mem_op)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_dom_mem_op),
+ "b" (dom_mem_op) : "memory" );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_multicall(void *call_list, int nr_calls)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_multicall),
+ "b" (call_list), "c" (nr_calls) : "memory" );
+
+ return ret;
+}
+
+static inline long HYPERVISOR_kbd_op(unsigned char op, unsigned char val)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_kbd_op),
+ "b" (op), "c" (val) );
+
+ return ret;
+}
+
+static inline int HYPERVISOR_update_va_mapping(
+ unsigned long page_nr, pte_t new_val, unsigned long flags)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping),
+ "b" (page_nr), "c" ((new_val).pte_low), "d" (flags) );
+
+ if ( unlikely(ret < 0) )
+ panic("Failed update VA mapping: %08lx, %08lx, %08lx",
+ page_nr, (new_val).pte_low, flags);
+
+ return ret;
+}
+
+#endif /* __HYPERVISOR_H__ */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/irq.h b/xenolinux-2.4.23-sparse/include/asm-xeno/irq.h
new file mode 100644
index 0000000000..3a4a3e394f
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/irq.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+/*
+ * linux/include/asm/irq.h
+ *
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke
+ * <tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <linux/config.h>
+#include <asm/hypervisor.h>
+#include <asm/ptrace.h>
+
+#define TIMER_IRQ _EVENT_TIMER
+
+#define NR_IRQS (sizeof(HYPERVISOR_shared_info->events) * 8)
+
+#define irq_cannonicalize(_irq) (_irq)
+
+extern void disable_irq(unsigned int);
+extern void disable_irq_nosync(unsigned int);
+extern void enable_irq(unsigned int);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
+#endif
+
+#endif /* _ASM_IRQ_H */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/keyboard.h b/xenolinux-2.4.23-sparse/include/asm-xeno/keyboard.h
new file mode 100644
index 0000000000..6d6461dfb9
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/keyboard.h
@@ -0,0 +1,95 @@
+/* xenolinux/include/asm-xeno/keyboard.h */
+/* Portions copyright (c) 2003 James Scott, Intel Research Cambridge */
+/* Talks to hypervisor to get PS/2 keyboard and mouse events, and send keyboard and mouse commands */
+
+/* Based on:
+ * linux/include/asm-i386/keyboard.h
+ *
+ * Created 3 Nov 1996 by Geert Uytterhoeven
+ */
+
+#ifndef _XENO_KEYBOARD_H
+#define _XENO_KEYBOARD_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/kd.h>
+#include <linux/pm.h>
+#include <asm/io.h>
+
+extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
+extern int pckbd_getkeycode(unsigned int scancode);
+extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
+ char raw_mode);
+extern char pckbd_unexpected_up(unsigned char keycode);
+extern void pckbd_leds(unsigned char leds);
+extern void pckbd_init_hw(void);
+extern int pckbd_pm_resume(struct pm_dev *, pm_request_t, void *);
+
+extern pm_callback pm_kbd_request_override;
+extern unsigned char pckbd_sysrq_xlate[128];
+
+#define kbd_setkeycode pckbd_setkeycode
+#define kbd_getkeycode pckbd_getkeycode
+#define kbd_translate pckbd_translate
+#define kbd_unexpected_up pckbd_unexpected_up
+#define kbd_leds pckbd_leds
+#define kbd_init_hw pckbd_init_hw
+#define kbd_sysrq_xlate pckbd_sysrq_xlate
+
+#define SYSRQ_KEY 0x54
+
+
+/* THIS SECTION TALKS TO XEN TO DO PS2 SUPPORT */
+#include <asm/hypervisor-ifs/kbd.h>
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+
+#define kbd_controller_present xen_kbd_controller_present
+
+static inline int xen_kbd_controller_present ()
+{
+ return start_info.flags & SIF_CONSOLE;
+}
+
+/* resource allocation */
+#define kbd_request_region() \
+ do { } while (0)
+#define kbd_request_irq(handler) \
+ request_irq(_EVENT_PS2, handler, 0, "ps/2", NULL)
+
+/* could implement these with command to xen to filter mouse stuff... */
+#define aux_request_irq(hand, dev_id) 0
+#define aux_free_irq(dev_id) do { } while(0)
+
+/* Some stoneage hardware needs delays after some operations. */
+#define kbd_pause() do { } while(0)
+
+static unsigned char kbd_current_scancode = 0;
+
+static unsigned char kbd_read_input(void)
+{
+ return kbd_current_scancode;
+}
+
+static unsigned char kbd_read_status(void)
+{
+ long res;
+ res = HYPERVISOR_kbd_op(KBD_OP_READ,0);
+ if ( res<0 )
+ {
+ kbd_current_scancode = 0;
+ return 0; /* error with our request - wrong domain? */
+ }
+ kbd_current_scancode = KBD_CODE_SCANCODE(res);
+ return KBD_CODE_STATUS(res);
+}
+
+
+#define kbd_write_output(val) HYPERVISOR_kbd_op(KBD_OP_WRITEOUTPUT, val);
+#define kbd_write_command(val) HYPERVISOR_kbd_op(KBD_OP_WRITECOMMAND, val);
+
+
+#endif /* __KERNEL__ */
+#endif /* _XENO_KEYBOARD_H */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/mmu_context.h b/xenolinux-2.4.23-sparse/include/asm-xeno/mmu_context.h
new file mode 100644
index 0000000000..1eab441990
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/mmu_context.h
@@ -0,0 +1,73 @@
+#ifndef __I386_MMU_CONTEXT_H
+#define __I386_MMU_CONTEXT_H
+
+#include <linux/config.h>
+#include <asm/desc.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+
+/*
+ * possibly do the LDT unload here?
+ */
+#define destroy_context(mm) do { } while(0)
+#define init_new_context(tsk,mm) 0
+
+#ifdef CONFIG_SMP
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+ if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
+ cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
+}
+#else
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+#endif
+
+extern pgd_t *cur_pgd;
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+{
+ if (prev != next) {
+ /* stop flush ipis for the previous mm */
+ clear_bit(cpu, &prev->cpu_vm_mask);
+ /*
+ * Re-load LDT if necessary
+ */
+ if (prev->context.segments != next->context.segments)
+ load_LDT(next);
+#ifdef CONFIG_SMP
+ cpu_tlbstate[cpu].state = TLBSTATE_OK;
+ cpu_tlbstate[cpu].active_mm = next;
+#endif
+ set_bit(cpu, &next->cpu_vm_mask);
+ set_bit(cpu, &next->context.cpuvalid);
+ /* Re-load page tables */
+ cur_pgd = next->pgd;
+ queue_pt_switch(__pa(cur_pgd));
+ }
+#ifdef CONFIG_SMP
+ else {
+ cpu_tlbstate[cpu].state = TLBSTATE_OK;
+ if(cpu_tlbstate[cpu].active_mm != next)
+ out_of_line_bug();
+ if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
+ /* We were in lazy tlb mode and leave_mm disabled
+ * tlb flush IPI delivery. We must reload %cr3.
+ */
+ load_cr3(next->pgd);
+ }
+ if (!test_and_set_bit(cpu, &next->context.cpuvalid))
+ load_LDT(next);
+ }
+#endif
+}
+
+#define activate_mm(prev, next) \
+do { \
+ switch_mm((prev),(next),NULL,smp_processor_id()); \
+ flush_page_update_queue(); \
+} while ( 0 )
+
+#endif
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/msr.h b/xenolinux-2.4.23-sparse/include/asm-xeno/msr.h
new file mode 100644
index 0000000000..1a2c8765a8
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/msr.h
@@ -0,0 +1,138 @@
+#ifndef __ASM_MSR_H
+#define __ASM_MSR_H
+
+/*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+ * pointer indirection), this allows gcc to optimize better
+ */
+
+#define rdmsr(msr,val1,val2) \
+{ \
+ dom0_op_t op; \
+ op.cmd = DOM0_MSR; \
+ op.u.msr.write = 0; \
+ op.u.msr.msr = msr; \
+ op.u.msr.cpu_mask = (1 << current->processor); \
+ HYPERVISOR_dom0_op(&op); \
+ val1 = op.u.msr.out1; \
+ val2 = op.u.msr.out2; \
+}
+
+#define wrmsr(msr,val1,val2) \
+{ \
+ dom0_op_t op; \
+ op.cmd = DOM0_MSR; \
+ op.u.msr.write = 1; \
+ op.u.msr.cpu_mask = (1 << current->processor); \
+ op.u.msr.msr = msr; \
+ op.u.msr.in1 = val1; \
+ op.u.msr.in2 = val2; \
+ HYPERVISOR_dom0_op(&op); \
+}
+
+#define rdtsc(low,high) \
+ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+ __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+ __asm__ __volatile__("rdtsc" : "=A" (val))
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) \
+ __asm__ __volatile__("rdpmc" \
+ : "=a" (low), "=d" (high) \
+ : "c" (counter))
+
+/* symbolic names for some interesting MSRs */
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR 0
+#define MSR_IA32_P5_MC_TYPE 1
+#define MSR_IA32_PLATFORM_ID 0x17
+#define MSR_IA32_EBL_CR_POWERON 0x2a
+
+#define MSR_IA32_APICBASE 0x1b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE 0x79
+#define MSR_IA32_UCODE_REV 0x8b
+
+#define MSR_IA32_BBL_CR_CTL 0x119
+
+#define MSR_IA32_MCG_CAP 0x179
+#define MSR_IA32_MCG_STATUS 0x17a
+#define MSR_IA32_MCG_CTL 0x17b
+
+#define MSR_IA32_THERM_CONTROL 0x19a
+#define MSR_IA32_THERM_INTERRUPT 0x19b
+#define MSR_IA32_THERM_STATUS 0x19c
+#define MSR_IA32_MISC_ENABLE 0x1a0
+
+#define MSR_IA32_DEBUGCTLMSR 0x1d9
+#define MSR_IA32_LASTBRANCHFROMIP 0x1db
+#define MSR_IA32_LASTBRANCHTOIP 0x1dc
+#define MSR_IA32_LASTINTFROMIP 0x1dd
+#define MSR_IA32_LASTINTTOIP 0x1de
+
+#define MSR_IA32_MC0_CTL 0x400
+#define MSR_IA32_MC0_STATUS 0x401
+#define MSR_IA32_MC0_ADDR 0x402
+#define MSR_IA32_MC0_MISC 0x403
+
+#define MSR_P6_PERFCTR0 0xc1
+#define MSR_P6_PERFCTR1 0xc2
+#define MSR_P6_EVNTSEL0 0x186
+#define MSR_P6_EVNTSEL1 0x187
+
+#define MSR_IA32_PERF_STATUS 0x198
+#define MSR_IA32_PERF_CTL 0x199
+
+/* AMD Defined MSRs */
+#define MSR_K6_EFER 0xC0000080
+#define MSR_K6_STAR 0xC0000081
+#define MSR_K6_WHCR 0xC0000082
+#define MSR_K6_UWCCR 0xC0000085
+#define MSR_K6_EPMR 0xC0000086
+#define MSR_K6_PSOR 0xC0000087
+#define MSR_K6_PFIR 0xC0000088
+
+#define MSR_K7_EVNTSEL0 0xC0010000
+#define MSR_K7_PERFCTR0 0xC0010004
+#define MSR_K7_HWCR 0xC0010015
+#define MSR_K7_CLK_CTL 0xC001001b
+#define MSR_K7_FID_VID_CTL 0xC0010041
+#define MSR_K7_VID_STATUS 0xC0010042
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1 0x107
+#define MSR_IDT_FCR2 0x108
+#define MSR_IDT_FCR3 0x109
+#define MSR_IDT_FCR4 0x10a
+
+#define MSR_IDT_MCR0 0x110
+#define MSR_IDT_MCR1 0x111
+#define MSR_IDT_MCR2 0x112
+#define MSR_IDT_MCR3 0x113
+#define MSR_IDT_MCR4 0x114
+#define MSR_IDT_MCR5 0x115
+#define MSR_IDT_MCR6 0x116
+#define MSR_IDT_MCR7 0x117
+#define MSR_IDT_MCR_CTRL 0x120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR 0x1107
+#define MSR_VIA_LONGHAUL 0x110a
+#define MSR_VIA_BCR2 0x1147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL 0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
+#define MSR_TMTA_LRTI_READOUT 0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
+
+#endif /* __ASM_MSR_H */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/multicall.h b/xenolinux-2.4.23-sparse/include/asm-xeno/multicall.h
new file mode 100644
index 0000000000..f0ea5c3a66
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/multicall.h
@@ -0,0 +1,84 @@
+/******************************************************************************
+ * multicall.h
+ */
+
+#ifndef __MULTICALL_H__
+#define __MULTICALL_H__
+
+#include <asm/hypervisor.h>
+
+extern multicall_entry_t multicall_list[];
+extern int nr_multicall_ents;
+
+static inline void queue_multicall0(unsigned long op)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall1(unsigned long op, unsigned long arg1)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ multicall_list[i].args[0] = arg1;
+ nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall2(
+ unsigned long op, unsigned long arg1, unsigned long arg2)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ multicall_list[i].args[0] = arg1;
+ multicall_list[i].args[1] = arg2;
+ nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall3(
+ unsigned long op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ multicall_list[i].args[0] = arg1;
+ multicall_list[i].args[1] = arg2;
+ multicall_list[i].args[2] = arg3;
+ nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall4(
+ unsigned long op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ multicall_list[i].args[0] = arg1;
+ multicall_list[i].args[1] = arg2;
+ multicall_list[i].args[2] = arg3;
+ multicall_list[i].args[3] = arg4;
+ nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall5(
+ unsigned long op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ multicall_list[i].args[0] = arg1;
+ multicall_list[i].args[1] = arg2;
+ multicall_list[i].args[2] = arg3;
+ multicall_list[i].args[3] = arg4;
+ multicall_list[i].args[4] = arg5;
+ nr_multicall_ents = i+1;
+}
+
+static inline void execute_multicall_list(void)
+{
+ if ( unlikely(nr_multicall_ents == 0) ) return;
+ (void)HYPERVISOR_multicall(multicall_list, nr_multicall_ents);
+ nr_multicall_ents = 0;
+}
+
+#endif /* __MULTICALL_H__ */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/page.h b/xenolinux-2.4.23-sparse/include/asm-xeno/page.h
new file mode 100644
index 0000000000..b7640a7d78
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/page.h
@@ -0,0 +1,173 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+
+#ifdef CONFIG_X86_USE_3DNOW
+
+#include <asm/mmx.h>
+
+#define clear_page(page) mmx_clear_page((void *)(page))
+#define copy_page(to,from) mmx_copy_page(to,from)
+
+#else
+
+/*
+ * On older X86 processors its not a win to use MMX here it seems.
+ * Maybe the K6-III ?
+ */
+
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+
+#endif
+
+#define clear_user_page(page, vaddr) clear_page(page)
+#define copy_user_page(to, from, vaddr) copy_page(to, from)
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+extern unsigned long *phys_to_machine_mapping;
+#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
+#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
+static inline unsigned long phys_to_machine(unsigned long phys)
+{
+ unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+ return machine;
+}
+static inline unsigned long machine_to_phys(unsigned long machine)
+{
+ unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+ return phys;
+}
+
+/*
+ * These are used to make use of C type-checking..
+ */
+#if CONFIG_X86_PAE
+typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long long pmd; } pmd_t;
+typedef struct { unsigned long long pgd; } pgd_t;
+#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+#else
+typedef struct { unsigned long pte_low; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+static inline unsigned long pte_val(pte_t x)
+{
+ unsigned long ret = x.pte_low;
+ if ( (ret & 1) ) ret = machine_to_phys(ret);
+ return ret;
+}
+#endif
+#define PTE_MASK PAGE_MASK
+
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+static inline unsigned long pmd_val(pmd_t x)
+{
+ unsigned long ret = x.pmd;
+ if ( (ret & 1) ) ret = machine_to_phys(ret);
+ return ret;
+}
+#define pgd_val(x) ({ BUG(); (unsigned long)0; })
+#define pgprot_val(x) ((x).pgprot)
+
+static inline pte_t __pte(unsigned long x)
+{
+ if ( (x & 1) ) x = phys_to_machine(x);
+ return ((pte_t) { (x) });
+}
+static inline pmd_t __pmd(unsigned long x)
+{
+ if ( (x & 1) ) x = phys_to_machine(x);
+ return ((pmd_t) { (x) });
+}
+#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/*
+ * This handles the memory map.. We could make this a config
+ * option, but too many people screw it up, and too few need
+ * it.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB.
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+
+#define __PAGE_OFFSET (0xC0000000)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Tell the user there is some problem. Beep too, so we can
+ * see^H^H^Hhear bugs in early bootup as well!
+ * The offending file and line are encoded after the "officially
+ * undefined" opcode for parsing in the trap handler.
+ */
+
+#if 1 /* Set to zero for a slightly smaller kernel */
+#define BUG() \
+ __asm__ __volatile__( "ud2\n" \
+ "\t.word %c0\n" \
+ "\t.long %c1\n" \
+ : : "i" (__LINE__), "i" (__FILE__))
+#else
+#define BUG() __asm__ __volatile__("ud2\n")
+#endif
+
+#define PAGE_BUG(page) do { \
+ BUG(); \
+} while (0)
+
+/* Pure 2^n version of get_order */
+static __inline__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
+#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/pgalloc.h b/xenolinux-2.4.23-sparse/include/asm-xeno/pgalloc.h
new file mode 100644
index 0000000000..9a90cb1b1d
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/pgalloc.h
@@ -0,0 +1,274 @@
+#ifndef _I386_PGALLOC_H
+#define _I386_PGALLOC_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <asm/hypervisor.h>
+#include <linux/threads.h>
+
+/*
+ * Quick lists are aligned so that least significant bits of array pointer
+ * are all zero when list is empty, and all one when list is full.
+ */
+#define QUICKLIST_ENTRIES 256
+#define QUICKLIST_EMPTY(_l) !((unsigned long)(_l) & ((QUICKLIST_ENTRIES*4)-1))
+#define QUICKLIST_FULL(_l) QUICKLIST_EMPTY((_l)+1)
+#define pgd_quicklist (current_cpu_data.pgd_quick)
+#define pmd_quicklist (current_cpu_data.pmd_quick)
+#define pte_quicklist (current_cpu_data.pte_quick)
+#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+
+#define pmd_populate(mm, pmd, pte) \
+ do { \
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
+ XENO_flush_page_update_queue(); \
+ } while ( 0 )
+
+/*
+ * Allocate and free page tables.
+ */
+
+#if defined (CONFIG_X86_PAE)
+
+#error "no PAE support as yet"
+
+/*
+ * We can't include <linux/slab.h> here, thus these uglinesses.
+ */
+struct kmem_cache_s;
+
+extern struct kmem_cache_s *pae_pgd_cachep;
+extern void *kmem_cache_alloc(struct kmem_cache_s *, int);
+extern void kmem_cache_free(struct kmem_cache_s *, void *);
+
+
+static inline pgd_t *get_pgd_slow(void)
+{
+ int i;
+ pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
+
+ if (pgd) {
+ for (i = 0; i < USER_PTRS_PER_PGD; i++) {
+ unsigned long pmd = __get_free_page(GFP_KERNEL);
+ if (!pmd)
+ goto out_oom;
+ clear_page(pmd);
+ set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
+ }
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return pgd;
+out_oom:
+ for (i--; i >= 0; i--)
+ free_page((unsigned long)__va(pgd_val(pgd[i])-1));
+ kmem_cache_free(pae_pgd_cachep, pgd);
+ return NULL;
+}
+
+#else
+
+static inline pgd_t *get_pgd_slow(void)
+{
+ pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+ if (pgd) {
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ init_mm.pgd + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ __make_page_readonly(pgd);
+ queue_pgd_pin(__pa(pgd));
+
+ }
+ return pgd;
+}
+
+#endif /* CONFIG_X86_PAE */
+
+static inline pgd_t *get_pgd_fast(void)
+{
+ unsigned long ret;
+
+ if ( !QUICKLIST_EMPTY(pgd_quicklist) ) {
+ ret = *(--pgd_quicklist);
+ pgtable_cache_size--;
+
+ } else
+ ret = (unsigned long)get_pgd_slow();
+ return (pgd_t *)ret;
+}
+
+static inline void free_pgd_slow(pgd_t *pgd)
+{
+#if defined(CONFIG_X86_PAE)
+#error
+ int i;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i++)
+ free_page((unsigned long)__va(pgd_val(pgd[i])-1));
+ kmem_cache_free(pae_pgd_cachep, pgd);
+#else
+ queue_pgd_unpin(__pa(pgd));
+ __make_page_writeable(pgd);
+ free_page((unsigned long)pgd);
+#endif
+}
+
+static inline void free_pgd_fast(pgd_t *pgd)
+{
+ if ( !QUICKLIST_FULL(pgd_quicklist) ) {
+ *(pgd_quicklist++) = (unsigned long)pgd;
+ pgtable_cache_size++;
+ } else
+ free_pgd_slow(pgd);
+}
+
+static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (pte)
+ {
+ clear_page(pte);
+ __make_page_readonly(pte);
+ queue_pte_pin(__pa(pte));
+ }
+ return pte;
+
+}
+
+static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
+ unsigned long address)
+{
+ unsigned long ret = 0;
+ if ( !QUICKLIST_EMPTY(pte_quicklist) ) {
+ ret = *(--pte_quicklist);
+ pgtable_cache_size--;
+ }
+ return (pte_t *)ret;
+}
+
+static __inline__ void pte_free_slow(pte_t *pte)
+{
+ queue_pte_unpin(__pa(pte));
+ __make_page_writeable(pte);
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free_fast(pte_t *pte)
+{
+ if ( !QUICKLIST_FULL(pte_quicklist) ) {
+ *(pte_quicklist++) = (unsigned long)pte;
+ pgtable_cache_size++;
+ } else
+ pte_free_slow(pte);
+}
+
+#define pte_free(pte) pte_free_fast(pte)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc(mm) get_pgd_fast()
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ * (In the PAE case we free the pmds as part of the pgd.)
+ */
+
+#define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+#define pmd_free_slow(x) do { } while (0)
+#define pmd_free_fast(x) do { } while (0)
+#define pmd_free(x) do { } while (0)
+#define pgd_populate(mm, pmd, pte) BUG()
+
+extern int do_check_pgt_cache(int, int);
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * ..but the i386 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm == current->active_mm) queue_tlb_flush();
+ XENO_flush_page_update_queue();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_mm == current->active_mm) queue_invlpg(addr);
+ XENO_flush_page_update_queue();
+}
+
+static inline void flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm == current->active_mm) queue_tlb_flush();
+ XENO_flush_page_update_queue();
+}
+
+#else
+#error no guestos SMP support yet...
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+ __flush_tlb()
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(mm);
+}
+
+#define TLBSTATE_OK 1
+#define TLBSTATE_LAZY 2
+
+struct tlb_state
+{
+ struct mm_struct *active_mm;
+ int state;
+} ____cacheline_aligned;
+extern struct tlb_state cpu_tlbstate[NR_CPUS];
+
+#endif /* CONFIG_SMP */
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* i386 does not keep any page table caches in TLB */
+ XENO_flush_page_update_queue();
+}
+
+extern int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot);
+
+#endif /* _I386_PGALLOC_H */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/pgtable-2level.h b/xenolinux-2.4.23-sparse/include/asm-xeno/pgtable-2level.h
new file mode 100644
index 0000000000..c780f644c0
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/pgtable-2level.h
@@ -0,0 +1,71 @@
+#ifndef _I386_PGTABLE_2LEVEL_H
+#define _I386_PGTABLE_2LEVEL_H
+
+/*
+ * traditional i386 two-level paging structure:
+ */
+
+#define PGDIR_SHIFT 22
+#define PTRS_PER_PGD 1024
+
+/*
+ * the i386 is two-level, so we don't really have any
+ * PMD directory physically.
+ */
+#define PMD_SHIFT 22
+#define PTRS_PER_PMD 1
+
+#define PTRS_PER_PTE 1024
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+#define pgd_clear(xp) do { } while (0)
+
+#define set_pte(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
+#define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
+#define set_pmd(pmdptr, pmdval) queue_l2_entry_update((pmdptr), (pmdval).pmd)
+#define set_pgd(pgdptr, pgdval) ((void)0)
+
+#define pgd_page(pgd) \
+((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
+
+static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+{
+ return (pmd_t *) dir;
+}
+
+/*
+ * A note on implementation of this atomic 'get-and-clear' operation.
+ * This is actually very simple because XenoLinux can only run on a single
+ * processor. Therefore, we cannot race other processors setting the 'accessed'
+ * or 'dirty' bits on a page-table entry.
+ * Even if pages are shared between domains, that is not a problem because
+ * each domain will have separate page tables, with their own versions of
+ * accessed & dirty state.
+ */
+static inline pte_t ptep_get_and_clear(pte_t *xp)
+{
+ pte_t pte = *xp;
+ queue_l1_entry_update(xp, 0);
+ return pte;
+}
+
+#define pte_same(a, b) ((a).pte_low == (b).pte_low)
+#define pte_page(x) (mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT))))
+#define pte_none(x) (!(x).pte_low)
+#define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
+
+#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/pgtable.h b/xenolinux-2.4.23-sparse/include/asm-xeno/pgtable.h
new file mode 100644
index 0000000000..07087bdf39
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/pgtable.h
@@ -0,0 +1,370 @@
+#ifndef _I386_PGTABLE_H
+#define _I386_PGTABLE_H
+
+#include <linux/config.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the i386, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * i386 mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the i386 page table tree.
+ */
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/hypervisor.h>
+#include <linux/threads.h>
+#include <asm/fixmap.h>
+
+#ifndef _I386_BITOPS_H
+#include <asm/bitops.h>
+#endif
+
+#define swapper_pg_dir 0
+extern void paging_init(void);
+
+/* Caches aren't brain-dead on the intel. */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(mm, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
+#define flush_icache_range(start, end) do { } while (0)
+#define flush_icache_page(vma,pg) do { } while (0)
+#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
+
+extern unsigned long pgkern_mask;
+
+#define __flush_tlb() ({ queue_tlb_flush(); XENO_flush_page_update_queue(); })
+#define __flush_tlb_global() __flush_tlb()
+#define __flush_tlb_all() __flush_tlb_global()
+#define __flush_tlb_one(addr) ({ queue_invlpg(addr); XENO_flush_page_update_queue(); })
+#define __flush_tlb_single(addr) ({ queue_invlpg(addr); XENO_flush_page_update_queue(); })
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[1024];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+#ifndef __ASSEMBLY__
+#if CONFIG_X86_PAE
+# include <asm/pgtable-3level.h>
+
+/*
+ * Need to initialise the X86 PAE caches
+ */
+extern void pgtable_cache_init(void);
+
+#else
+# include <asm/pgtable-2level.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+#endif
+#endif
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR 0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define TWOLEVEL_PGDIR_SHIFT 22
+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
+
+
+#ifndef __ASSEMBLY__
+/* 4MB is just a nice "safety zone". Also, we align to a fresh pde. */
+#define VMALLOC_OFFSET (4*1024*1024)
+extern void * high_memory;
+#define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
+ ~(VMALLOC_OFFSET-1))
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#define VMALLOC_END (FIXADDR_START - 2*PAGE_SIZE)
+
+#define _PAGE_BIT_PRESENT 0
+#define _PAGE_BIT_RW 1
+#define _PAGE_BIT_USER 2
+#define _PAGE_BIT_PWT 3
+#define _PAGE_BIT_PCD 4
+#define _PAGE_BIT_ACCESSED 5
+#define _PAGE_BIT_DIRTY 6
+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+#define _PAGE_BIT_IO 9
+
+#define _PAGE_PRESENT 0x001
+#define _PAGE_RW 0x002
+#define _PAGE_USER 0x004
+#define _PAGE_PWT 0x008
+#define _PAGE_PCD 0x010
+#define _PAGE_ACCESSED 0x020
+#define _PAGE_DIRTY 0x040
+#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
+#define _PAGE_IO 0x200
+
+#define _PAGE_PROTNONE 0x080 /* If not present */
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
+#define __PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define __PAGE_KERNEL_NOCACHE \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
+#define __PAGE_KERNEL_RO \
+ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+#if 0
+#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
+#else
+#define MAKE_GLOBAL(x) __pgprot(x)
+#endif
+
+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
+
+/*
+ * The i386 can't do page protection for execute, and considers that
+ * the same are read. Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED
+
+#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(xp) queue_l1_entry_update(xp, 0)
+
+#define pmd_none(x) (!(x).pmd)
+#define pmd_present(x) ((x).pmd & _PAGE_PRESENT)
+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+#define pmd_bad(x) (((x).pmd & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
+static inline int pte_exec(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
+static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
+static inline int pte_io(pte_t pte) { return (pte).pte_low & _PAGE_IO; }
+
+static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
+static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
+static inline pte_t pte_mkio(pte_t pte) { (pte).pte_low |= _PAGE_IO; return pte; }
+
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+ unsigned long pteval = *(unsigned long *)ptep;
+ int ret = pteval & _PAGE_DIRTY;
+ if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_DIRTY);
+ return ret;
+}
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+ unsigned long pteval = *(unsigned long *)ptep;
+ int ret = pteval & _PAGE_ACCESSED;
+ if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_ACCESSED);
+ return ret;
+}
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+ unsigned long pteval = *(unsigned long *)ptep;
+ if ( (pteval & _PAGE_RW) )
+ queue_l1_entry_update(ptep, pteval & ~_PAGE_RW);
+}
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+ unsigned long pteval = *(unsigned long *)ptep;
+ if ( !(pteval & _PAGE_DIRTY) )
+ queue_l1_entry_update(ptep, pteval | _PAGE_DIRTY);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define mk_pte(page, pgprot) __mk_pte((page) - mem_map, (pgprot))
+
+/* This takes a physical page address that is used by the remapping functions */
+#define mk_pte_phys(physpage, pgprot) __mk_pte((physpage) >> PAGE_SHIFT, pgprot)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte.pte_low &= _PAGE_CHG_MASK;
+ pte.pte_low |= pgprot_val(newprot);
+ return pte;
+}
+
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+#define pmd_page(pmd) \
+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+/* to find an entry in a page-table-directory. */
+#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+#define __pgd_offset(address) pgd_index(address)
+
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define __pmd_offset(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/* Find an entry in the third-level page table.. */
+#define __pte_offset(address) \
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
+ __pte_offset(address))
+
+/*
+ * The i386 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+#define update_mmu_cache(vma,address,pte) do { } while (0)
+
+/* Encode and de-code a swap entry */
+#define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
+#define SWP_OFFSET(x) ((x).val >> 8)
+#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
+#define swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+struct page;
+int change_page_attr(struct page *, int, pgprot_t prot);
+
+static inline void __make_page_readonly(void *va)
+{
+ pgd_t *pgd = pgd_offset_k((unsigned long)va);
+ pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+ pte_t *pte = pte_offset(pmd, (unsigned long)va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+}
+
+static inline void __make_page_writeable(void *va)
+{
+ pgd_t *pgd = pgd_offset_k((unsigned long)va);
+ pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+ pte_t *pte = pte_offset(pmd, (unsigned long)va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+}
+
+static inline void make_page_readonly(void *va)
+{
+ pgd_t *pgd = pgd_offset_k((unsigned long)va);
+ pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+ pte_t *pte = pte_offset(pmd, (unsigned long)va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+ if ( (unsigned long)va >= VMALLOC_START )
+ __make_page_readonly(machine_to_virt(
+ *(unsigned long *)pte&PAGE_MASK));
+}
+
+static inline void make_page_writeable(void *va)
+{
+ pgd_t *pgd = pgd_offset_k((unsigned long)va);
+ pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+ pte_t *pte = pte_offset(pmd, (unsigned long)va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+ if ( (unsigned long)va >= VMALLOC_START )
+ __make_page_writeable(machine_to_virt(
+ *(unsigned long *)pte&PAGE_MASK));
+}
+
+static inline void make_pages_readonly(void *va, unsigned int nr)
+{
+ while ( nr-- != 0 )
+ {
+ make_page_readonly(va);
+ va = (void *)((unsigned long)va + PAGE_SIZE);
+ }
+}
+
+static inline void make_pages_writeable(void *va, unsigned int nr)
+{
+ while ( nr-- != 0 )
+ {
+ make_page_writeable(va);
+ va = (void *)((unsigned long)va + PAGE_SIZE);
+ }
+}
+
+static inline unsigned long arbitrary_virt_to_phys(void *va)
+{
+ pgd_t *pgd = pgd_offset_k((unsigned long)va);
+ pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+ pte_t *pte = pte_offset(pmd, (unsigned long)va);
+ unsigned long pa = (*(unsigned long *)pte) & PAGE_MASK;
+ return pa | ((unsigned long)va & (PAGE_SIZE-1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define PageSkip(page) (0)
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_page_range remap_page_range
+
+#endif /* _I386_PGTABLE_H */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/proc_cmd.h b/xenolinux-2.4.23-sparse/include/asm-xeno/proc_cmd.h
new file mode 100644
index 0000000000..2fddd2c243
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/proc_cmd.h
@@ -0,0 +1,28 @@
+/******************************************************************************
+ * proc_cmd.h
+ *
+ * Interface to /proc/cmd and /proc/xeno/privcmd.
+ */
+
+#ifndef __PROC_CMD_H__
+#define __PROC_CMD_H__
+
+typedef struct privcmd_hypercall
+{
+ unsigned long op;
+ unsigned long arg[5];
+} privcmd_hypercall_t;
+
+typedef struct privcmd_blkmsg
+{
+ unsigned long op;
+ void *buf;
+ int buf_size;
+} privcmd_blkmsg_t;
+
+#define IOCTL_PRIVCMD_HYPERCALL \
+ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
+#define IOCTL_PRIVCMD_BLKMSG \
+ _IOC(_IOC_NONE, 'P', 1, sizeof(privcmd_blkmsg_t))
+
+#endif /* __PROC_CMD_H__ */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/processor.h b/xenolinux-2.4.23-sparse/include/asm-xeno/processor.h
new file mode 100644
index 0000000000..0b4571a9da
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/processor.h
@@ -0,0 +1,481 @@
+/*
+ * include/asm-i386/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_I386_PROCESSOR_H
+#define __ASM_I386_PROCESSOR_H
+
+#include <asm/math_emu.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+#include <asm/cpufeature.h>
+#include <linux/cache.h>
+#include <linux/config.h>
+#include <linux/threads.h>
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+
+/*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ * Members of this structure are referenced in head.S, so think twice
+ * before touching them. [mj]
+ */
+
+struct cpuinfo_x86 {
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+ __u8 x86_mask;
+ char wp_works_ok; /* It doesn't on 386's */
+ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
+ char hard_math;
+ char rfu;
+ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
+ __u32 x86_capability[NCAPINTS];
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ int x86_cache_size; /* in KB - valid for CPUS which support this
+ call */
+ int fdiv_bug;
+ int f00f_bug;
+ int coma_bug;
+ unsigned long loops_per_jiffy;
+ unsigned long *pgd_quick;
+ unsigned long *pmd_quick;
+ unsigned long *pte_quick;
+ unsigned long pgtable_cache_sz;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_RISE 6
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NSC 8
+#define X86_VENDOR_SIS 9
+#define X86_VENDOR_UNKNOWN 0xff
+
+/*
+ * capabilities of CPUs
+ */
+
+extern struct cpuinfo_x86 boot_cpu_data;
+extern struct tss_struct init_tss[NR_CPUS];
+
+#ifdef CONFIG_SMP
+extern struct cpuinfo_x86 cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+#endif
+
+extern char ignore_irq13;
+
+extern void identify_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+/*
+ * Generic CPUID function
+ */
+static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+{
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (op));
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax;
+
+ __asm__("cpuid"
+ : "=a" (eax)
+ : "0" (op)
+ : "bx", "cx", "dx");
+ return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+ unsigned int eax, ebx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx)
+ : "0" (op)
+ : "cx", "dx" );
+ return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+ unsigned int eax, ecx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=c" (ecx)
+ : "0" (op)
+ : "bx", "dx" );
+ return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, edx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=d" (edx)
+ : "0" (op)
+ : "bx", "cx");
+ return edx;
+}
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x0008 /* enable debugging extensions */
+#define X86_CR4_PSE 0x0010 /* enable page size extensions */
+#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+#define X86_CR4_MCE 0x0040 /* Machine check enable */
+#define X86_CR4_PGE 0x0080 /* enable global pages */
+#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
+#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
+
+#define load_cr3(pgdir) \
+ asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)));
+
+extern unsigned long mmu_cr4_features;
+
+#include <asm/hypervisor.h>
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+ HYPERVISOR_console_write("No set_in_cr4", 13);
+}
+
+static inline void clear_in_cr4 (unsigned long mask)
+{
+ HYPERVISOR_console_write("No clear_in_cr4", 15);
+}
+
+/*
+ * Cyrix CPU configuration register indexes
+ */
+#define CX86_CCR0 0xc0
+#define CX86_CCR1 0xc1
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_CCR6 0xea
+#define CX86_CCR7 0xeb
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+#define CX86_ARR_BASE 0xc4
+#define CX86_RCR_BASE 0xdc
+
+/*
+ * Cyrix CPU indexed register access macros
+ */
+
+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86(reg, data) do { \
+ outb((reg), 0x22); \
+ outb((data), 0x23); \
+} while (0)
+
+/*
+ * Bus types (default is ISA, but people can check others with these..)
+ */
+#ifdef CONFIG_EISA
+extern int EISA_bus;
+#else
+#define EISA_bus (0)
+#endif
+extern int MCA_bus;
+
+/* from system description table in BIOS. Mostly for MCA use, but
+others may find it useful. */
+extern unsigned int machine_id;
+extern unsigned int machine_submodel_id;
+extern unsigned int BIOS_revision;
+extern unsigned int mca_pentium_flag;
+
+/*
+ * User space process size: 3GB (default).
+ */
+#define TASK_SIZE (PAGE_OFFSET)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+/*
+ * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
+ */
+#define IO_BITMAP_SIZE 32
+#define IO_BITMAP_BYTES (IO_BITMAP_SIZE * 4)
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+
+struct i387_fsave_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+ long status; /* software status information */
+};
+
+struct i387_fxsave_struct {
+ unsigned short cwd;
+ unsigned short swd;
+ unsigned short twd;
+ unsigned short fop;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long mxcsr;
+ long reserved;
+ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
+ long padding[56];
+} __attribute__ ((aligned (16)));
+
+struct i387_soft_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+ unsigned char ftop, changed, lookahead, no_update, rm, alimit;
+ struct info *info;
+ unsigned long entry_eip;
+};
+
+union i387_union {
+ struct i387_fsave_struct fsave;
+ struct i387_fxsave_struct fxsave;
+ struct i387_soft_struct soft;
+};
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct tss_struct {
+ unsigned short back_link,__blh;
+ unsigned long esp0;
+ unsigned short ss0,__ss0h;
+ unsigned long esp1;
+ unsigned short ss1,__ss1h;
+ unsigned long esp2;
+ unsigned short ss2,__ss2h;
+ unsigned long __cr3;
+ unsigned long eip;
+ unsigned long eflags;
+ unsigned long eax,ecx,edx,ebx;
+ unsigned long esp;
+ unsigned long ebp;
+ unsigned long esi;
+ unsigned long edi;
+ unsigned short es, __esh;
+ unsigned short cs, __csh;
+ unsigned short ss, __ssh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+ unsigned short ldt, __ldth;
+ unsigned short trace, bitmap;
+ unsigned long io_bitmap[IO_BITMAP_SIZE+1];
+ /*
+ * pads the TSS to be cacheline-aligned (size is 0x100)
+ */
+ unsigned long __cacheline_filler[5];
+};
+
+struct thread_struct {
+ unsigned long esp0;
+ unsigned long eip;
+ unsigned long esp;
+ unsigned long fs;
+ unsigned long gs;
+ unsigned int io_pl;
+/* Hardware debugging registers */
+ unsigned long debugreg[8]; /* %%db0-7 debug registers */
+/* fault info */
+ unsigned long cr2, trap_no, error_code;
+/* floating point info */
+ union i387_union i387;
+/* virtual 86 mode info */
+ struct vm86_struct * vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags, v86mask, saved_esp0;
+};
+
+#define INIT_THREAD { sizeof(init_stack) + (long) &init_stack, \
+ 0, 0, 0, 0, 0, 0, {0}, 0, 0, 0, {{0}}, 0, 0, 0, 0, 0 }
+
+#define INIT_TSS { \
+ 0,0, /* back_link, __blh */ \
+ sizeof(init_stack) + (long) &init_stack, /* esp0 */ \
+ __KERNEL_DS, 0, /* ss0 */ \
+ 0,0,0,0,0,0, /* stack1, stack2 */ \
+ 0, /* cr3 */ \
+ 0,0, /* eip,eflags */ \
+ 0,0,0,0, /* eax,ecx,edx,ebx */ \
+ 0,0,0,0, /* esp,ebp,esi,edi */ \
+ 0,0,0,0,0,0, /* es,cs,ss */ \
+ 0,0,0,0,0,0, /* ds,fs,gs */ \
+ 0,0, /* ldt */ \
+ 0, INVALID_IO_BITMAP_OFFSET, /* tace, bitmap */ \
+ {~0, } /* ioperm */ \
+}
+
+#define start_thread(regs, new_eip, new_esp) do { \
+ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
+ set_fs(USER_DS); \
+ regs->xds = __USER_DS; \
+ regs->xes = __USER_DS; \
+ regs->xss = __USER_DS; \
+ regs->xcs = __USER_CS; \
+ regs->eip = new_eip; \
+ regs->esp = new_esp; \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/* Copy and release all segment info associated with a VM */
+extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
+extern void release_segments(struct mm_struct * mm);
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+static inline unsigned long thread_saved_pc(struct thread_struct *t)
+{
+ return ((unsigned long *)t->esp)[3];
+}
+
+unsigned long get_wchan(struct task_struct *p);
+#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
+#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
+
+#define THREAD_SIZE (2*PAGE_SIZE)
+#define alloc_task_struct() ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
+#define free_task_struct(p) free_pages((unsigned long) (p), 1)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
+
+#define init_task (init_task_union.task)
+#define init_stack (init_task_union.stack)
+
+struct microcode {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int reserved[5];
+ unsigned int bits[500];
+};
+
+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
+#define MICROCODE_IOCFREE _IO('6',0)
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+ __asm__ __volatile__("rep;nop" ::: "memory");
+}
+
+#define cpu_relax() rep_nop()
+
+/* Prefetch instructions for Pentium III and AMD Athlon */
+#if defined(CONFIG_MPENTIUMIII) || defined (CONFIG_MPENTIUM4)
+
+#define ARCH_HAS_PREFETCH
+extern inline void prefetch(const void *x)
+{
+ __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
+}
+
+#elif CONFIG_X86_USE_3DNOW
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+extern inline void prefetch(const void *x)
+{
+ __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
+}
+
+extern inline void prefetchw(const void *x)
+{
+ __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
+}
+#define spin_lock_prefetch(x) prefetchw(x)
+
+#endif
+
+#define TF_MASK 0x100
+
+#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/ptrace.h b/xenolinux-2.4.23-sparse/include/asm-xeno/ptrace.h
new file mode 100644
index 0000000000..4457ac0b17
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/ptrace.h
@@ -0,0 +1,63 @@
+#ifndef _I386_PTRACE_H
+#define _I386_PTRACE_H
+
+#define EBX 0
+#define ECX 1
+#define EDX 2
+#define ESI 3
+#define EDI 4
+#define EBP 5
+#define EAX 6
+#define DS 7
+#define ES 8
+#define FS 9
+#define GS 10
+#define ORIG_EAX 11
+#define EIP 12
+#define CS 13
+#define EFL 14
+#define UESP 15
+#define SS 16
+#define FRAME_SIZE 17
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct pt_regs {
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ int xds;
+ int xes;
+ long orig_eax;
+ long eip;
+ int xcs;
+ long eflags;
+ long esp;
+ int xss;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+#define PTRACE_GETFPXREGS 18
+#define PTRACE_SETFPXREGS 19
+
+#define PTRACE_SETOPTIONS 21
+
+/* options set using PTRACE_SETOPTIONS */
+#define PTRACE_O_TRACESYSGOOD 0x00000001
+
+#ifdef __KERNEL__
+#define user_mode(regs) ((regs) && (2 & (regs)->xcs))
+#define instruction_pointer(regs) ((regs) ? (regs)->eip : NULL)
+extern void show_regs(struct pt_regs *);
+#endif
+
+#endif
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/segment.h b/xenolinux-2.4.23-sparse/include/asm-xeno/segment.h
new file mode 100644
index 0000000000..ca13028ce0
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/segment.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+
+#define __KERNEL_CS FLAT_RING1_CS
+#define __KERNEL_DS FLAT_RING1_DS
+
+#define __USER_CS FLAT_RING3_CS
+#define __USER_DS FLAT_RING3_DS
+
+#endif
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/smp.h b/xenolinux-2.4.23-sparse/include/asm-xeno/smp.h
new file mode 100644
index 0000000000..804b93c332
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/smp.h
@@ -0,0 +1,102 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
+#ifndef __ASSEMBLY__
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/ptrace.h>
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#ifndef __ASSEMBLY__
+#include <asm/bitops.h>
+#include <asm/mpspec.h>
+#ifdef CONFIG_X86_IO_APIC
+#include <asm/io_apic.h>
+#endif
+#include <asm/apic.h>
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+#ifndef __ASSEMBLY__
+
+/*
+ * Private routines/data
+ */
+
+extern void smp_alloc_memory(void);
+extern unsigned long phys_cpu_present_map;
+extern unsigned long cpu_online_map;
+extern volatile unsigned long smp_invalidate_needed;
+extern int pic_mode;
+extern int smp_num_siblings;
+extern int cpu_sibling_map[];
+
+extern void smp_flush_tlb(void);
+extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
+extern void smp_send_reschedule(int cpu);
+extern void smp_invalidate_rcv(void); /* Process an NMI */
+extern void (*mtrr_hook) (void);
+extern void zap_low_mappings (void);
+
+/*
+ * On x86 all CPUs are mapped 1:1 to the APIC space.
+ * This simplifies scheduling and IPI sending and
+ * compresses data structures.
+ */
+static inline int cpu_logical_map(int cpu)
+{
+ return cpu;
+}
+static inline int cpu_number_map(int cpu)
+{
+ return cpu;
+}
+
+/*
+ * Some lowlevel functions might want to know about
+ * the real APIC ID <-> CPU # mapping.
+ */
+#define MAX_APICID 256
+extern volatile int cpu_to_physical_apicid[NR_CPUS];
+extern volatile int physical_apicid_to_cpu[MAX_APICID];
+extern volatile int cpu_to_logical_apicid[NR_CPUS];
+extern volatile int logical_apicid_to_cpu[MAX_APICID];
+
+/*
+ * General functions that each host system must provide.
+ */
+
+extern void smp_boot_cpus(void);
+extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial udelay numbers */
+
+/*
+ * This function is needed by all SMP systems. It must _always_ be valid
+ * from the initial startup. We map APIC_BASE very early in page_setup(),
+ * so this is correct in the x86 case.
+ */
+
+#define smp_processor_id() (current->processor)
+
+#endif /* !__ASSEMBLY__ */
+
+#define NO_PROC_ID 0xFF /* No processor magic marker */
+
+/*
+ * This magic constant controls our willingness to transfer
+ * a process across CPUs. Such a transfer incurs misses on the L1
+ * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
+ * gut feeling is this will vary by board in value. For a board
+ * with separate L2 cache it probably depends also on the RSS, and
+ * for a board with shared L2 cache it ought to decay fast as other
+ * processes are run.
+ */
+
+#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
+
+#endif
+#endif
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/suspend.h b/xenolinux-2.4.23-sparse/include/asm-xeno/suspend.h
new file mode 100644
index 0000000000..337290dc95
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/suspend.h
@@ -0,0 +1,25 @@
+/******************************************************************************
+ * suspend.h
+ *
+ * NB. This file is part of the Xenolinux interface with Xenoserver control
+ * software. It can be included in such software without invoking the GPL.
+ *
+ * Copyright (c) 2003, K A Fraser
+ */
+
+#ifndef __ASM_XENO_SUSPEND_H__
+#define __ASM_XENO_SUSPEND_H__
+
+typedef struct suspend_record_st {
+ /* To be filled in before resume. */
+ start_info_t resume_info;
+ /*
+ * The number of a machine frame containing, in sequence, the number of
+ * each machine frame that contains PFN -> MFN translation table data.
+ */
+ unsigned long pfn_to_mfn_frame_list;
+ /* Number of entries in the PFN -> MFN translation table. */
+ unsigned long nr_pfns;
+} suspend_record_t;
+
+#endif /* __ASM_XENO_SUSPEND_H__ */
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/system.h b/xenolinux-2.4.23-sparse/include/asm-xeno/system.h
new file mode 100644
index 0000000000..3b59252ca3
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/system.h
@@ -0,0 +1,400 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/segment.h>
+#include <asm/hypervisor.h>
+#include <linux/bitops.h> /* for LOCK_PREFIX */
+
+#ifdef __KERNEL__
+
+struct task_struct;
+extern void FASTCALL(__switch_to(struct task_struct *prev,
+ struct task_struct *next));
+
+#define prepare_to_switch() \
+do { \
+ struct thread_struct *__t = &current->thread; \
+ __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (*(int *)&__t->fs) ); \
+ __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (*(int *)&__t->gs) ); \
+} while (0)
+#define switch_to(prev,next,last) do { \
+ asm volatile("pushl %%esi\n\t" \
+ "pushl %%edi\n\t" \
+ "pushl %%ebp\n\t" \
+ "movl %%esp,%0\n\t" /* save ESP */ \
+ "movl %3,%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%1\n\t" /* save EIP */ \
+ "pushl %4\n\t" /* restore EIP */ \
+ "jmp __switch_to\n" \
+ "1:\t" \
+ "popl %%ebp\n\t" \
+ "popl %%edi\n\t" \
+ "popl %%esi\n\t" \
+ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
+ "=b" (last) \
+ :"m" (next->thread.esp),"m" (next->thread.eip), \
+ "a" (prev), "d" (next), \
+ "b" (prev)); \
+} while (0)
+
+#define _set_base(addr,base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%2\n\t" \
+ "movb %%dh,%3" \
+ :"=&d" (__pr) \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "0" (base) \
+ ); } while(0)
+
+#define _set_limit(addr,limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %2,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%2" \
+ :"=&d" (__lr) \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "0" (limit) \
+ ); } while(0)
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value) \
+ asm volatile("\n" \
+ "1:\t" \
+ "movl %0,%%" #seg "\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\t" \
+ "pushl $0\n\t" \
+ "popl %%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,3b\n" \
+ ".previous" \
+ : :"m" (*(unsigned int *)&(value)))
+
+#define clts() ((void)0)
+#define read_cr0() ({ \
+ unsigned int __dummy; \
+ __asm__( \
+ "movl %%cr0,%0\n\t" \
+ :"=r" (__dummy)); \
+ __dummy; \
+})
+#define write_cr0(x) \
+ __asm__("movl %0,%%cr0": :"r" (x));
+
+#define read_cr4() ({ \
+ unsigned int __dummy; \
+ __asm__( \
+ "movl %%cr4,%0\n\t" \
+ :"=r" (__dummy)); \
+ __dummy; \
+})
+#define write_cr4(x) \
+ __asm__("movl %0,%%cr4": :"r" (x));
+#define stts() (HYPERVISOR_fpu_taskswitch())
+
+#endif /* __KERNEL__ */
+
+#define wbinvd() \
+ __asm__ __volatile__ ("wbinvd": : :"memory");
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+/*
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
+ *
+ * chmxchg8b must be used with the lock prefix here to allow
+ * the instruction to be executed atomically, see page 3-102
+ * of the instruction set reference 24319102.pdf. We need
+ * the reader side to see the coherent 64bit value.
+ */
+static inline void __set_64bit (unsigned long long * ptr,
+ unsigned int low, unsigned int high)
+{
+ __asm__ __volatile__ (
+ "\n1:\t"
+ "movl (%0), %%eax\n\t"
+ "movl 4(%0), %%edx\n\t"
+ "lock cmpxchg8b (%0)\n\t"
+ "jnz 1b"
+ : /* no outputs */
+ : "D"(ptr),
+ "b"(low),
+ "c"(high)
+ : "ax","dx","memory");
+}
+
+static inline void __set_64bit_constant (unsigned long long *ptr,
+ unsigned long long value)
+{
+ __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+}
+#define ll_low(x) *(((unsigned int*)&(x))+0)
+#define ll_high(x) *(((unsigned int*)&(x))+1)
+
+static inline void __set_64bit_var (unsigned long long *ptr,
+ unsigned long long value)
+{
+ __set_64bit(ptr,ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit_constant(ptr, value) : \
+ __set_64bit_var(ptr, value) )
+
+#define _set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#ifdef CONFIG_X86_CMPXCHG
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+#else
+/* Compiling for a 386 proper. Is it worth implementing via cli/sti? */
+#endif
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
+ *
+ * Some non intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+
+#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define rmb() mb()
+
+#ifdef CONFIG_X86_OOSTORE
+#define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#else
+#define wmb() __asm__ __volatile__ ("": : :"memory")
+#endif
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif
+
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+
+#define __cli() \
+do { \
+ clear_bit(EVENTS_MASTER_ENABLE_BIT, &HYPERVISOR_shared_info->events_mask);\
+ barrier(); \
+} while (0)
+
+#define __sti() \
+do { \
+ shared_info_t *_shared = HYPERVISOR_shared_info; \
+ set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask); \
+ barrier(); \
+ if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL); \
+} while (0)
+
+#define __save_flags(x) \
+do { \
+ (x) = test_bit(EVENTS_MASTER_ENABLE_BIT, \
+ &HYPERVISOR_shared_info->events_mask); \
+ barrier(); \
+} while (0)
+
+#define __restore_flags(x) do { if (x) __sti(); } while (0)
+
+#define safe_halt() ((void)0)
+
+#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0);
+#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0);
+
+#define local_irq_save(x) \
+do { \
+ (x) = test_and_clear_bit(EVENTS_MASTER_ENABLE_BIT, \
+ &HYPERVISOR_shared_info->events_mask); \
+ barrier(); \
+} while (0)
+#define local_irq_restore(x) __restore_flags(x)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
+
+#ifdef CONFIG_SMP
+#error no SMP
+extern void __global_cli(void);
+extern void __global_sti(void);
+extern unsigned long __global_save_flags(void);
+extern void __global_restore_flags(unsigned long);
+#define cli() __global_cli()
+#define sti() __global_sti()
+#define save_flags(x) ((x)=__global_save_flags())
+#define restore_flags(x) __global_restore_flags(x)
+#define save_and_cli(x) do { save_flags(x); cli(); } while(0);
+#define save_and_sti(x) do { save_flags(x); sti(); } while(0);
+
+#else
+
+#define cli() __cli()
+#define sti() __sti()
+#define save_flags(x) __save_flags(x)
+#define restore_flags(x) __restore_flags(x)
+#define save_and_cli(x) __save_and_cli(x)
+#define save_and_sti(x) __save_and_sti(x)
+
+#endif
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+extern unsigned long dmi_broken;
+extern int is_sony_vaio_laptop;
+
+#define BROKEN_ACPI_Sx 0x0001
+#define BROKEN_INIT_AFTER_S1 0x0002
+#define BROKEN_PNP_BIOS 0x0004
+
+#endif
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/vga.h b/xenolinux-2.4.23-sparse/include/asm-xeno/vga.h
new file mode 100644
index 0000000000..d0624cf480
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/vga.h
@@ -0,0 +1,42 @@
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+#include <asm/io.h>
+
+extern unsigned char *vgacon_mmap;
+
+static unsigned long VGA_MAP_MEM(unsigned long x)
+{
+ if( vgacon_mmap == NULL )
+ {
+ /* This is our first time in this function. This whole thing
+ is a rather grim hack. We know we're going to get asked
+ to map a 32KB region between 0xb0000 and 0xb8000 because
+ that's what VGAs are. We used the boot time permanent
+ fixed map region, and map it to machine pages.
+ */
+ if( x != 0xb8000 )
+ panic("Argghh! VGA Console is weird. 1:%08lx\n",x);
+
+ vgacon_mmap = (unsigned char*) bt_ioremap( 0xa0000, 128*1024 );
+ return (unsigned long) (vgacon_mmap+x-0xa0000);
+ }
+ else
+ {
+ if( x != 0xc0000 && x != 0xa0000 ) /* vidmem_end or charmap fonts */
+ panic("Argghh! VGA Console is weird. 2:%08lx\n",x);
+ return (unsigned long) (vgacon_mmap+x-0xa0000);
+ }
+ return 0;
+}
+
+static inline unsigned char vga_readb(unsigned char * x) { return (*(x)); }
+static inline void vga_writeb(unsigned char x, unsigned char *y) { *(y) = (x); }
+
+#endif
diff --git a/xenolinux-2.4.23-sparse/include/asm-xeno/xeno_proc.h b/xenolinux-2.4.23-sparse/include/asm-xeno/xeno_proc.h
new file mode 100644
index 0000000000..d794b733f5
--- /dev/null
+++ b/xenolinux-2.4.23-sparse/include/asm-xeno/xeno_proc.h
@@ -0,0 +1,13 @@
+
+#ifndef __ASM_XENO_PROC_H__
+#define __ASM_XENO_PROC_H__
+
+#include <linux/config.h>
+#include <linux/proc_fs.h>
+
+extern struct proc_dir_entry *create_xeno_proc_entry(
+ const char *name, mode_t mode);
+extern void remove_xeno_proc_entry(
+ const char *name);
+
+#endif /* __ASM_XENO_PROC_H__ */