aboutsummaryrefslogtreecommitdiffstats
path: root/xenolinux-2.4.26-sparse/include/asm-xen
diff options
context:
space:
mode:
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-06-09 16:36:08 +0000
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-06-09 16:36:08 +0000
commit03907beb680ee39bee212bb3cb170cb295238cda (patch)
treef594d4df409070fe7f41cb1709f1ae22b76ac17c /xenolinux-2.4.26-sparse/include/asm-xen
parent63cf7286eeb40cadb2581fef38376da82a25b2ba (diff)
downloadxen-03907beb680ee39bee212bb3cb170cb295238cda.tar.gz
xen-03907beb680ee39bee212bb3cb170cb295238cda.tar.bz2
xen-03907beb680ee39bee212bb3cb170cb295238cda.zip
bitkeeper revision 1.946 (40c73c7805slPvnfEohXfQoiriAESg)
Rename Linux directories and upgrade build system.
Diffstat (limited to 'xenolinux-2.4.26-sparse/include/asm-xen')
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/bugs.h53
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/ctrl_if.h121
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/desc.h41
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/evtchn.h93
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/fixmap.h105
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/highmem.h132
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/hw_irq.h61
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h479
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/io.h429
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/irq.h59
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/keyboard.h100
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/mmu_context.h74
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/msr.h138
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/multicall.h84
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/page.h173
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/pci.h283
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/pgalloc.h286
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/pgtable-2level.h72
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/pgtable.h374
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/proc_cmd.h63
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/processor.h484
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/ptrace.h63
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/segment.h15
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/smp.h102
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/suspend.h25
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/synch_bitops.h83
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/system.h408
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/vga.h42
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/xen_proc.h13
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/xor.h879
30 files changed, 0 insertions, 5334 deletions
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/bugs.h b/xenolinux-2.4.26-sparse/include/asm-xen/bugs.h
deleted file mode 100644
index c46b6a0b15..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/bugs.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * include/asm-i386/bugs.h
- *
- * Copyright (C) 1994 Linus Torvalds
- *
- * Cyrix stuff, June 1998 by:
- * - Rafael R. Reilova (moved everything from head.S),
- * <rreilova@ececs.uc.edu>
- * - Channing Corn (tests & fixes),
- * - Andrew D. Balsa (code cleanup).
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@valinux.com>, May 2000
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/msr.h>
-
-
-static void __init check_fpu(void)
-{
- boot_cpu_data.fdiv_bug = 0;
-}
-
-static void __init check_hlt(void)
-{
- boot_cpu_data.hlt_works_ok = 1;
-}
-
-static void __init check_bugs(void)
-{
- extern void __init boot_init_fpu(void);
-
- identify_cpu(&boot_cpu_data);
- boot_init_fpu();
-#ifndef CONFIG_SMP
- printk("CPU: ");
- print_cpu_info(&boot_cpu_data);
-#endif
- check_fpu();
- check_hlt();
- system_utsname.machine[1] = '0' +
- (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
-}
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/ctrl_if.h b/xenolinux-2.4.26-sparse/include/asm-xen/ctrl_if.h
deleted file mode 100644
index 5bc6cc22b1..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/ctrl_if.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/******************************************************************************
- * ctrl_if.h
- *
- * Management functions for special interface to the domain controller.
- *
- * Copyright (c) 2004, K A Fraser
- */
-
-#ifndef __ASM_XEN__CTRL_IF_H__
-#define __ASM_XEN__CTRL_IF_H__
-
-#include <linux/tqueue.h>
-#include <asm/hypervisor.h>
-
-typedef control_msg_t ctrl_msg_t;
-
-/*
- * Callback function type. Called for asynchronous processing of received
- * request messages, and responses to previously-transmitted request messages.
- * The parameters are (@msg, @id).
- * @msg: Original request/response message (not a copy). The message can be
- * modified in-place by the handler (e.g., a response callback can
- * turn a request message into a response message in place). The message
- * is no longer accessible after the callback handler returns -- if the
- * message is required to persist for longer then it must be copied.
- * @id: (Response callbacks only) The 'id' that was specified when the
- * original request message was queued for transmission.
- */
-typedef void (*ctrl_msg_handler_t)(ctrl_msg_t *, unsigned long);
-
-/*
- * Send @msg to the domain controller. Execute @hnd when a response is
- * received, passing the response message and the specified @id. This
- * operation will not block: it will return -EAGAIN if there is no space.
- * Notes:
- * 1. The @msg is copied if it is transmitted and so can be freed after this
- * function returns.
- * 2. If @hnd is NULL then no callback is executed.
- */
-int ctrl_if_send_message_noblock(
- ctrl_msg_t *msg,
- ctrl_msg_handler_t hnd,
- unsigned long id);
-
-/*
- * Send @msg to the domain controller. Execute @hnd when a response is
- * received, passing the response message and the specified @id. This
- * operation will block until the message is sent, or a signal is received
- * for the calling process (unless @wait_state is TASK_UNINTERRUPTIBLE).
- * Notes:
- * 1. The @msg is copied if it is transmitted and so can be freed after this
- * function returns.
- * 2. If @hnd is NULL then no callback is executed.
- */
-int ctrl_if_send_message_block(
- ctrl_msg_t *msg,
- ctrl_msg_handler_t hnd,
- unsigned long id,
- long wait_state);
-
-/*
- * Request a callback when there is /possibly/ space to immediately send a
- * message to the domain controller. This function returns 0 if there is
- * already space to trasnmit a message --- in this case the callback task /may/
- * still be executed. If this function returns 1 then the callback /will/ be
- * executed when space becomes available.
- */
-int ctrl_if_enqueue_space_callback(struct tq_struct *task);
-
-/*
- * Send a response (@msg) to a message from the domain controller. This will
- * never block.
- * Notes:
- * 1. The @msg is copied and so can be freed after this function returns.
- * 2. The @msg may be the original request message, modified in-place.
- */
-void ctrl_if_send_response(ctrl_msg_t *msg);
-
-/*
- * Register a receiver for typed messages from the domain controller. The
- * handler (@hnd) is called for every received message of specified @type.
- * Returns TRUE (non-zero) if the handler was successfully registered.
- * If CALLBACK_IN_BLOCKING CONTEXT is specified in @flags then callbacks will
- * occur in a context in which it is safe to yield (i.e., process context).
- */
-#define CALLBACK_IN_BLOCKING_CONTEXT 1
-int ctrl_if_register_receiver(
- u8 type,
- ctrl_msg_handler_t hnd,
- unsigned int flags);
-
-/*
- * Unregister a receiver for typed messages from the domain controller. The
- * handler (@hnd) will not be executed after this function returns.
- */
-void ctrl_if_unregister_receiver(u8 type, ctrl_msg_handler_t hnd);
-
-/* Suspend/resume notifications. */
-void ctrl_if_suspend(void);
-void ctrl_if_resume(void);
-
-/* Start-of-day setup. */
-void ctrl_if_init(void);
-
-/*
- * Returns TRUE if there are no outstanding message requests at the domain
- * controller. This can be used to ensure that messages have really flushed
- * through when it is not possible to use the response-callback interface.
- * WARNING: If other subsystems are using the control interface then this
- * function might never return TRUE!
- */
-int ctrl_if_transmitter_empty(void); /* !! DANGEROUS FUNCTION !! */
-
-/*
- * Manually discard response messages from the domain controller.
- * WARNING: This is usually done automatically -- this function should only
- * be called when normal interrupt mechanisms are disabled!
- */
-void ctrl_if_discard_responses(void); /* !! DANGEROUS FUNCTION !! */
-
-#endif /* __ASM_XEN__CONTROL_IF_H__ */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/desc.h b/xenolinux-2.4.26-sparse/include/asm-xen/desc.h
deleted file mode 100644
index 33309a9671..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/desc.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __ARCH_DESC_H
-#define __ARCH_DESC_H
-
-#include <asm/ldt.h>
-
-#ifndef __ASSEMBLY__
-
-struct desc_struct {
- unsigned long a,b;
-};
-
-struct Xgt_desc_struct {
- unsigned short size;
- unsigned long address __attribute__((packed));
-};
-
-extern struct desc_struct default_ldt[];
-
-static inline void clear_LDT(void)
-{
- /*
- * NB. We load the default_ldt for lcall7/27 handling on demand, as
- * it slows down context switching. Noone uses it anyway.
- */
- queue_set_ldt(0, 0);
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
- void *segments = pc->ldt;
- int count = pc->size;
-
- if ( count == 0 )
- segments = NULL;
-
- queue_set_ldt((unsigned long)segments, count);
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ARCH_DESC_H__ */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/evtchn.h b/xenolinux-2.4.26-sparse/include/asm-xen/evtchn.h
deleted file mode 100644
index 128d766a34..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/evtchn.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/******************************************************************************
- * evtchn.h
- *
- * Communication via Xen event channels.
- * Also definitions for the device that demuxes notifications to userspace.
- *
- * Copyright (c) 2004, K A Fraser
- */
-
-#ifndef __ASM_EVTCHN_H__
-#define __ASM_EVTCHN_H__
-
-#include <linux/config.h>
-#include <asm/hypervisor.h>
-#include <asm/ptrace.h>
-#include <asm/synch_bitops.h>
-#include <asm/hypervisor-ifs/event_channel.h>
-
-/*
- * LOW-LEVEL DEFINITIONS
- */
-
-/* Entry point for notifications into Linux subsystems. */
-void evtchn_do_upcall(struct pt_regs *regs);
-
-/* Entry point for notifications into the userland character device. */
-void evtchn_device_upcall(int port);
-
-static inline void mask_evtchn(int port)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
- synch_set_bit(port, &s->evtchn_mask[0]);
-}
-
-static inline void unmask_evtchn(int port)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
-
- synch_clear_bit(port, &s->evtchn_mask[0]);
-
- /*
- * The following is basically the equivalent of 'hw_resend_irq'. Just like
- * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
- */
- if ( synch_test_bit (port, &s->evtchn_pending[0]) &&
- !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
- {
- s->vcpu_data[0].evtchn_upcall_pending = 1;
- if ( !s->vcpu_data[0].evtchn_upcall_mask )
- evtchn_do_upcall(NULL);
- }
-}
-
-static inline void clear_evtchn(int port)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
- synch_clear_bit(port, &s->evtchn_pending[0]);
-}
-
-static inline void clear_evtchn_exception(int port)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
- synch_clear_bit(port, &s->evtchn_exception[0]);
-}
-
-static inline void notify_via_evtchn(int port)
-{
- evtchn_op_t op;
- op.cmd = EVTCHNOP_send;
- op.u.send.local_port = port;
- (void)HYPERVISOR_event_channel_op(&op);
-}
-
-/*
- * CHARACTER-DEVICE DEFINITIONS
- */
-
-#define PORT_NORMAL 0x0000
-#define PORT_EXCEPTION 0x8000
-#define PORTIDX_MASK 0x7fff
-
-/* /dev/xen/evtchn resides at device number major=10, minor=200 */
-#define EVTCHN_MINOR 200
-
-/* /dev/xen/evtchn ioctls: */
-/* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
-#define EVTCHN_RESET _IO('E', 1)
-/* EVTCHN_BIND: Bind to teh specified event-channel port. */
-#define EVTCHN_BIND _IO('E', 2)
-/* EVTCHN_UNBIND: Unbind from the specified event-channel port. */
-#define EVTCHN_UNBIND _IO('E', 3)
-
-#endif /* __ASM_EVTCHN_H__ */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/fixmap.h b/xenolinux-2.4.26-sparse/include/asm-xen/fixmap.h
deleted file mode 100644
index 338bd4ba2c..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/fixmap.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#ifdef CONFIG_HIGHMEM
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-#endif
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the end of virtual memory (0xfffff000) backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * highger than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-
-enum fixed_addresses {
-#ifdef CONFIG_HIGHMEM
- FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#endif
- FIX_BLKRING_BASE,
- FIX_NETRING0_BASE,
- FIX_NETRING1_BASE,
- FIX_NETRING2_BASE,
- FIX_NETRING3_BASE,
- FIX_SHARED_INFO,
-
-#ifdef CONFIG_VGA_CONSOLE
-#define NR_FIX_BTMAPS 32 /* 128KB For the Dom0 VGA Console A0000-C0000 */
-#else
-#define NR_FIX_BTMAPS 1 /* in case anyone wants it in future... */
-#endif
- FIX_BTMAP_END,
- FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
- /* our bt_ioremap is permanent, unlike other architectures */
-
- __end_of_permanent_fixed_addresses,
- __end_of_fixed_addresses = __end_of_permanent_fixed_addresses
-};
-
-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
-
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-
-extern void clear_fixmap(enum fixed_addresses idx);
-
-/*
- * used by vmalloc.c.
- *
- * Leave one empty page between vmalloc'ed areas and
- * the start of the fixmap, and leave one page empty
- * at the top of mem..
- */
-#define FIXADDR_TOP (HYPERVISOR_VIRT_START - 2*PAGE_SIZE)
-#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without tranlation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static inline unsigned long fix_to_virt(unsigned int idx)
-{
- return __fix_to_virt(idx);
-}
-
-#endif
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/highmem.h b/xenolinux-2.4.26-sparse/include/asm-xen/highmem.h
deleted file mode 100644
index 25ef32882c..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/highmem.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * highmem.h: virtual kernel memory mappings for high memory
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual addresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@pdb.siemens.de
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- */
-
-#ifndef _ASM_HIGHMEM_H
-#define _ASM_HIGHMEM_H
-
-#ifdef __KERNEL__
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define HIGHMEM_DEBUG 1
-#else
-#define HIGHMEM_DEBUG 0
-#endif
-
-/* declarations for highmem.c */
-extern unsigned long highstart_pfn, highend_pfn;
-
-extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
-extern pte_t *pkmap_page_table;
-
-extern void kmap_init(void) __init;
-
-/*
- * Right now we initialize only a single pte table. It can be extended
- * easily, subsequent pte tables have to be allocated in one physical
- * chunk of RAM.
- */
-#define PKMAP_BASE (HYPERVISOR_VIRT_START - (1<<23))
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 512
-#else
-#define LAST_PKMAP 1024
-#endif
-#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-
-extern void * FASTCALL(kmap_high(struct page *page, int nonblocking));
-extern void FASTCALL(kunmap_high(struct page *page));
-
-#define kmap(page) __kmap(page, 0)
-#define kmap_nonblock(page) __kmap(page, 1)
-
-static inline void *__kmap(struct page *page, int nonblocking)
-{
- if (in_interrupt())
- out_of_line_bug();
- if (page < highmem_start_page)
- return page_address(page);
- return kmap_high(page, nonblocking);
-}
-
-static inline void kunmap(struct page *page)
-{
- if (in_interrupt())
- out_of_line_bug();
- if (page < highmem_start_page)
- return;
- kunmap_high(page);
-}
-
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-static inline void *kmap_atomic(struct page *page, enum km_type type)
-{
- enum fixed_addresses idx;
- unsigned long vaddr;
-
- if (page < highmem_start_page)
- return page_address(page);
-
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#if HIGHMEM_DEBUG
- if (!pte_none(*(kmap_pte-idx)))
- out_of_line_bug();
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
-
- return (void*) vaddr;
-}
-
-static inline void kunmap_atomic(void *kvaddr, enum km_type type)
-{
-#if HIGHMEM_DEBUG
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- if (vaddr < FIXADDR_START) // FIXME
- return;
-
- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
- out_of_line_bug();
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(kmap_pte-idx);
- __flush_tlb_one(vaddr);
-#endif
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_HIGHMEM_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/hw_irq.h b/xenolinux-2.4.26-sparse/include/asm-xen/hw_irq.h
deleted file mode 100644
index d99d15bd24..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/hw_irq.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef _ASM_HW_IRQ_H
-#define _ASM_HW_IRQ_H
-
-/*
- * linux/include/asm/hw_irq.h
- *
- * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- */
-
-#include <linux/config.h>
-#include <linux/smp.h>
-#include <asm/atomic.h>
-#include <asm/irq.h>
-
-#define SYSCALL_VECTOR 0x80
-
-extern int irq_vector[NR_IRQS];
-
-extern atomic_t irq_err_count;
-extern atomic_t irq_mis_count;
-
-extern char _stext, _etext;
-
-extern unsigned long prof_cpu_mask;
-extern unsigned int * prof_buffer;
-extern unsigned long prof_len;
-extern unsigned long prof_shift;
-
-/*
- * x86 profiling function, SMP safe. We might want to do this in
- * assembly totally?
- */
-static inline void x86_do_profile (unsigned long eip)
-{
- if (!prof_buffer)
- return;
-
- /*
- * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
- * (default is all CPUs.)
- */
- if (!((1<<smp_processor_id()) & prof_cpu_mask))
- return;
-
- eip -= (unsigned long) &_stext;
- eip >>= prof_shift;
- /*
- * Don't ignore out-of-bounds EIP values silently,
- * put them into the last histogram slot, so if
- * present, they will show up as a sharp peak.
- */
- if (eip > prof_len-1)
- eip = prof_len-1;
- atomic_inc((atomic_t *)&prof_buffer[eip]);
-}
-
-static inline void hw_resend_irq(struct hw_interrupt_type *h,
- unsigned int i)
-{}
-
-#endif /* _ASM_HW_IRQ_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h b/xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h
deleted file mode 100644
index 3ace0bae8a..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h
+++ /dev/null
@@ -1,479 +0,0 @@
-/******************************************************************************
- * hypervisor.h
- *
- * Linux-specific hypervisor handling.
- *
- * Copyright (c) 2002, K A Fraser
- */
-
-#ifndef __HYPERVISOR_H__
-#define __HYPERVISOR_H__
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-#include <asm/hypervisor-ifs/dom0_ops.h>
-#include <asm/domain_controller.h>
-#include <asm/ptrace.h>
-#include <asm/page.h>
-
-/* arch/xen/kernel/setup.c */
-union start_info_union
-{
- extended_start_info_t start_info;
- char padding[512];
-};
-extern union start_info_union start_info_union;
-#define start_info (start_info_union.start_info)
-
-/* arch/xen/mm/hypervisor.c */
-/*
- * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
- * be MACHINE addresses.
- */
-
-extern unsigned int mmu_update_queue_idx;
-
-void queue_l1_entry_update(pte_t *ptr, unsigned long val);
-void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
-void queue_pt_switch(unsigned long ptr);
-void queue_tlb_flush(void);
-void queue_invlpg(unsigned long ptr);
-void queue_pgd_pin(unsigned long ptr);
-void queue_pgd_unpin(unsigned long ptr);
-void queue_pte_pin(unsigned long ptr);
-void queue_pte_unpin(unsigned long ptr);
-void queue_set_ldt(unsigned long ptr, unsigned long bytes);
-void queue_machphys_update(unsigned long mfn, unsigned long pfn);
-#define MMU_UPDATE_DEBUG 0
-
-#if MMU_UPDATE_DEBUG > 0
-typedef struct {
- void *ptr;
- unsigned long val, pteval;
- void *ptep;
- int line; char *file;
-} page_update_debug_t;
-extern page_update_debug_t update_debug_queue[];
-#define queue_l1_entry_update(_p,_v) ({ \
- update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
- update_debug_queue[mmu_update_queue_idx].val = (_v); \
- update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
- update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
- queue_l1_entry_update((_p),(_v)); \
-})
-#define queue_l2_entry_update(_p,_v) ({ \
- update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
- update_debug_queue[mmu_update_queue_idx].val = (_v); \
- update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
- update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
- queue_l2_entry_update((_p),(_v)); \
-})
-#endif
-
-#if MMU_UPDATE_DEBUG > 1
-#undef queue_l1_entry_update
-#undef queue_l2_entry_update
-#define queue_l1_entry_update(_p,_v) ({ \
- update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
- update_debug_queue[mmu_update_queue_idx].val = (_v); \
- update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
- update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
- printk("L1 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
- (_p), pte_val(_p), \
- (unsigned long)(_v)); \
- queue_l1_entry_update((_p),(_v)); \
-})
-#define queue_l2_entry_update(_p,_v) ({ \
- update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
- update_debug_queue[mmu_update_queue_idx].val = (_v); \
- update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
- update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
- printk("L2 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
- (_p), pmd_val(_p), \
- (unsigned long)(_v)); \
- queue_l2_entry_update((_p),(_v)); \
-})
-#define queue_pt_switch(_p) ({ \
- printk("PTSWITCH %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
- queue_pt_switch(_p); \
-})
-#define queue_tlb_flush() ({ \
- printk("TLB FLUSH %s %d\n", __FILE__, __LINE__); \
- queue_tlb_flush(); \
-})
-#define queue_invlpg(_p) ({ \
- printk("INVLPG %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
- queue_invlpg(_p); \
-})
-#define queue_pgd_pin(_p) ({ \
- printk("PGD PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
- queue_pgd_pin(_p); \
-})
-#define queue_pgd_unpin(_p) ({ \
- printk("PGD UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
- queue_pgd_unpin(_p); \
-})
-#define queue_pte_pin(_p) ({ \
- printk("PTE PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
- queue_pte_pin(_p); \
-})
-#define queue_pte_unpin(_p) ({ \
- printk("PTE UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \
- queue_pte_unpin(_p); \
-})
-#define queue_set_ldt(_p,_l) ({ \
- printk("SETL LDT %s %d: %08lx %d\n", __FILE__, __LINE__, (_p), (_l)); \
- queue_set_ldt((_p), (_l)); \
-})
-#endif
-
-void _flush_page_update_queue(void);
-static inline int flush_page_update_queue(void)
-{
- unsigned int idx = mmu_update_queue_idx;
- if ( idx != 0 ) _flush_page_update_queue();
- return idx;
-}
-#define XEN_flush_page_update_queue() (_flush_page_update_queue())
-void MULTICALL_flush_page_update_queue(void);
-
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-/* Allocate a contiguous empty region of low memory. Return virtual start. */
-unsigned long allocate_empty_lowmem_region(unsigned long pages);
-/* Deallocate a contiguous region of low memory. Return it to the allocator. */
-void deallocate_lowmem_region(unsigned long vstart, unsigned long pages);
-#endif
-
-/*
- * Assembler stubs for hyper-calls.
- */
-
-static inline int HYPERVISOR_set_trap_table(trap_info_t *table)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_set_trap_table),
- "b" (table) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_mmu_update(mmu_update_t *req,
- int count,
- int *success_count)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_mmu_update),
- "b" (req), "c" (count), "d" (success_count) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_set_gdt),
- "b" (frame_list), "c" (entries) : "memory" );
-
-
- return ret;
-}
-
-static inline int HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_stack_switch),
- "b" (ss), "c" (esp) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_set_callbacks(
- unsigned long event_selector, unsigned long event_address,
- unsigned long failsafe_selector, unsigned long failsafe_address)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
- "b" (event_selector), "c" (event_address),
- "d" (failsafe_selector), "S" (failsafe_address) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_net_io_op(netop_t *op)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_net_io_op),
- "b" (op) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_fpu_taskswitch(void)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_yield(void)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_yield) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_block(void)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_block) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_exit(void)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_exit) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_stop(unsigned long srec)
-{
- int ret;
- /* NB. On suspend, control software expects a suspend record in %esi. */
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_stop), "S" (srec) : "memory" );
-
- return ret;
-}
-
-static inline long HYPERVISOR_set_timer_op(u64 timeout)
-{
- int ret;
- unsigned long timeout_hi = (unsigned long)(timeout>>32);
- unsigned long timeout_lo = (unsigned long)timeout;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_set_timer_op),
- "b" (timeout_hi), "c" (timeout_lo) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_dom0_op(dom0_op_t *dom0_op)
-{
- int ret;
- dom0_op->interface_version = DOM0_INTERFACE_VERSION;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_dom0_op),
- "b" (dom0_op) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_network_op(void *network_op)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_network_op),
- "b" (network_op) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_block_io_op(void *block_io_op)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_block_io_op),
- "b" (block_io_op) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_set_debugreg(int reg, unsigned long value)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_set_debugreg),
- "b" (reg), "c" (value) : "memory" );
-
- return ret;
-}
-
-static inline unsigned long HYPERVISOR_get_debugreg(int reg)
-{
- unsigned long ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_get_debugreg),
- "b" (reg) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_update_descriptor(
- unsigned long pa, unsigned long word1, unsigned long word2)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_update_descriptor),
- "b" (pa), "c" (word1), "d" (word2) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_set_fast_trap(int idx)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_set_fast_trap),
- "b" (idx) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_dom_mem_op(unsigned int op,
- unsigned long *pages,
- unsigned long nr_pages)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_dom_mem_op),
- "b" (op), "c" (pages), "d" (nr_pages) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_multicall(void *call_list, int nr_calls)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_multicall),
- "b" (call_list), "c" (nr_calls) : "memory" );
-
- return ret;
-}
-
-static inline long HYPERVISOR_kbd_op(unsigned char op, unsigned char val)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_kbd_op),
- "b" (op), "c" (val) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_update_va_mapping(
- unsigned long page_nr, pte_t new_val, unsigned long flags)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping),
- "b" (page_nr), "c" ((new_val).pte_low), "d" (flags) : "memory" );
-
- if ( unlikely(ret < 0) )
- panic("Failed update VA mapping: %08lx, %08lx, %08lx",
- page_nr, (new_val).pte_low, flags);
-
- return ret;
-}
-
-static inline int HYPERVISOR_event_channel_op(void *op)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_event_channel_op),
- "b" (op) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_xen_version(int cmd)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_xen_version),
- "b" (cmd) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_console_io(int cmd, int count, char *str)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_console_io),
- "b" (cmd), "c" (count), "d" (str) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_physdev_op(void *physdev_op)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_physdev_op),
- "b" (physdev_op) : "memory" );
-
- return ret;
-}
-
-static inline int HYPERVISOR_update_va_mapping_otherdomain(
- unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
- "b" (page_nr), "c" ((new_val).pte_low), "d" (flags), "S" (domid) :
- "memory" );
-
- return ret;
-}
-
-#endif /* __HYPERVISOR_H__ */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/io.h b/xenolinux-2.4.26-sparse/include/asm-xen/io.h
deleted file mode 100644
index 5ab5fe9bfc..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/io.h
+++ /dev/null
@@ -1,429 +0,0 @@
-#ifndef _ASM_IO_H
-#define _ASM_IO_H
-
-#include <linux/config.h>
-
-/*
- * This file contains the definitions for the x86 IO instructions
- * inb/inw/inl/outb/outw/outl and the "string versions" of the same
- * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
- * versions of the single-IO instructions (inb_p/inw_p/..).
- *
- * This file is not meant to be obfuscating: it's just complicated
- * to (a) handle it all in a way that makes gcc able to optimize it
- * as well as possible and (b) trying to avoid writing the same thing
- * over and over again with slight variations and possibly making a
- * mistake somewhere.
- */
-
-/*
- * Thanks to James van Artsdalen for a better timing-fix than
- * the two short jumps: using outb's to a nonexistent port seems
- * to guarantee better timings even on fast machines.
- *
- * On the other hand, I'd like to be sure of a non-existent port:
- * I feel a bit unsafe about using 0x80 (should be safe, though)
- *
- * Linus
- */
-
- /*
- * Bit simplified and optimized by Jan Hubicka
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
- *
- * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
- * isa_read[wl] and isa_write[wl] fixed
- * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
- */
-
-#define IO_SPACE_LIMIT 0xffff
-
-#define XQUAD_PORTIO_BASE 0xfe400000
-#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
-#define XQUAD_PORTIO_LEN 0x80000 /* Only remapping first 2 quads */
-
-#ifdef __KERNEL__
-
-#include <linux/vmalloc.h>
-
-/*
- * Temporary debugging check to catch old code using
- * unmapped ISA addresses. Will be removed in 2.4.
- */
-#if CONFIG_DEBUG_IOVIRT
- extern void *__io_virt_debug(unsigned long x, const char *file, int line);
- extern unsigned long __io_phys_debug(unsigned long x, const char *file, int line);
- #define __io_virt(x) __io_virt_debug((unsigned long)(x), __FILE__, __LINE__)
-//#define __io_phys(x) __io_phys_debug((unsigned long)(x), __FILE__, __LINE__)
-#else
- #define __io_virt(x) ((void *)(x))
-//#define __io_phys(x) __pa(x)
-#endif
-
-/**
- * virt_to_phys - map virtual addresses to physical
- * @address: address to remap
- *
- * The returned physical address is the physical (CPU) mapping for
- * the memory address given. It is only valid to use this function on
- * addresses directly mapped or allocated via kmalloc.
- *
- * This function does not give bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-
-static inline unsigned long virt_to_phys(volatile void * address)
-{
- return __pa(address);
-}
-
-/**
- * phys_to_virt - map physical address to virtual
- * @address: address to remap
- *
- * The returned virtual address is a current CPU mapping for
- * the memory address given. It is only valid to use this function on
- * addresses that have a kernel mapping
- *
- * This function does not handle bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-
-static inline void * phys_to_virt(unsigned long address)
-{
- return __va(address);
-}
-
-/*
- * Change "struct page" to physical address.
- */
-#ifdef CONFIG_HIGHMEM64G
-#define page_to_phys(page) ((u64)(page - mem_map) << PAGE_SHIFT)
-#else
-#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
-#endif
-
-extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-
-/**
- * ioremap - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- */
-
-static inline void * ioremap (unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, 0);
-}
-
-/**
- * ioremap_nocache - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap_nocache performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * This version of ioremap ensures that the memory is marked uncachable
- * on the CPU as well as honouring existing caching rules from things like
- * the PCI bus. Note that there are other caches and buffers on many
- * busses. In paticular driver authors should read up on PCI writes
- *
- * It's useful if some control registers are in such an area and
- * write combining or read caching is not desirable:
- */
-
-static inline void * ioremap_nocache (unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, _PAGE_PCD);
-}
-
-extern void iounmap(void *addr);
-
-/*
- * bt_ioremap() and bt_iounmap() are for temporary early boot-time
- * mappings, before the real ioremap() is functional.
- * A boot-time mapping is currently limited to at most 16 pages.
- */
-extern void *bt_ioremap(unsigned long offset, unsigned long size);
-extern void bt_iounmap(void *addr, unsigned long size);
-
-#define virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
-#define bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
-#define page_to_bus(_x) phys_to_machine(page_to_phys(_x))
-#define bus_to_phys(_x) machine_to_phys(_x)
-#define bus_to_page(_x) (mem_map + (bus_to_phys(_x) >> PAGE_SHIFT))
-
-/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the x86 architecture, we just read/write the
- * memory location directly.
- */
-
-#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
-#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
-#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-
-#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
-#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
-#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-
-#define memset_io(a,b,c) __memset(__io_virt(a),(b),(c))
-#define memcpy_fromio(a,b,c) __memcpy((a),__io_virt(b),(c))
-#define memcpy_toio(a,b,c) __memcpy(__io_virt(a),(b),(c))
-
-/*
- * ISA space is 'always mapped' on a typical x86 system, no need to
- * explicitly ioremap() it. The fact that the ISA IO space is mapped
- * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
- * are physical addresses. The following constant pointer can be
- * used as the IO-area pointer (it can be iounmapped as well, so the
- * analogy with PCI is quite large):
- */
-#define __ISA_IO_base ((char *)(PAGE_OFFSET))
-
-#define isa_readb(a) readb(__ISA_IO_base + (a))
-#define isa_readw(a) readw(__ISA_IO_base + (a))
-#define isa_readl(a) readl(__ISA_IO_base + (a))
-#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
-#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
-#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
-
-
-/*
- * Again, i386 does not require mem IO specific function.
- */
-
-#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(b),(c),(d))
-#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d))
-
-/**
- * check_signature - find BIOS signatures
- * @io_addr: mmio address to check
- * @signature: signature block
- * @length: length of signature
- *
- * Perform a signature comparison with the mmio address io_addr. This
- * address should have been obtained by ioremap.
- * Returns 1 on a match.
- */
-
-static inline int check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
- do {
- if (readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
- return retval;
-}
-
-/**
- * isa_check_signature - find BIOS signatures
- * @io_addr: mmio address to check
- * @signature: signature block
- * @length: length of signature
- *
- * Perform a signature comparison with the ISA mmio address io_addr.
- * Returns 1 on a match.
- *
- * This function is deprecated. New drivers should use ioremap and
- * check_signature.
- */
-
-
-static inline int isa_check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
- do {
- if (isa_readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
- return retval;
-}
-
-/*
- * Cache management
- *
- * This needed for two cases
- * 1. Out of order aware processors
- * 2. Accidentally out of order processors (PPro errata #51)
- */
-
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
-
-static inline void flush_write_buffers(void)
-{
- __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
-}
-
-#define dma_cache_inv(_start,_size) flush_write_buffers()
-#define dma_cache_wback(_start,_size) flush_write_buffers()
-#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
-
-#else
-
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-#define flush_write_buffers()
-
-#endif
-
-#endif /* __KERNEL__ */
-
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
-#else
-#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
-#endif
-
-#ifdef REALLY_SLOW_IO
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-#else
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
-#endif
-
-#ifdef CONFIG_MULTIQUAD
-extern void *xquad_portio; /* Where the IO area was mapped */
-#endif /* CONFIG_MULTIQUAD */
-
-/*
- * Talk about misusing macros..
- */
-#define __OUT1(s,x) \
-static inline void out##s(unsigned x value, unsigned short port) {
-
-#define __OUT2(s,s1,s2) \
-__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
-
-#if defined (CONFIG_MULTIQUAD) && !defined(STANDALONE)
-#define __OUTQ(s,ss,x) /* Do the equivalent of the portio op on quads */ \
-static inline void out##ss(unsigned x value, unsigned short port) { \
- if (xquad_portio) \
- write##s(value, (unsigned long) xquad_portio + port); \
- else /* We're still in early boot, running on quad 0 */ \
- out##ss##_local(value, port); \
-} \
-static inline void out##ss##_quad(unsigned x value, unsigned short port, int quad) { \
- if (xquad_portio) \
- write##s(value, (unsigned long) xquad_portio + (XQUAD_PORTIO_QUAD*quad)\
- + port); \
-}
-
-#define __INQ(s,ss) /* Do the equivalent of the portio op on quads */ \
-static inline RETURN_TYPE in##ss(unsigned short port) { \
- if (xquad_portio) \
- return read##s((unsigned long) xquad_portio + port); \
- else /* We're still in early boot, running on quad 0 */ \
- return in##ss##_local(port); \
-} \
-static inline RETURN_TYPE in##ss##_quad(unsigned short port, int quad) { \
- if (xquad_portio) \
- return read##s((unsigned long) xquad_portio + (XQUAD_PORTIO_QUAD*quad)\
- + port); \
- else\
- return 0;\
-}
-#endif /* CONFIG_MULTIQUAD && !STANDALONE */
-
-#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE)
-#define __OUT(s,s1,x) \
-__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));}
-#else
-/* Make the default portio routines operate on quad 0 */
-#define __OUT(s,s1,x) \
-__OUT1(s##_local,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p_local,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
-__OUTQ(s,s,x) \
-__OUTQ(s,s##_p,x)
-#endif /* !CONFIG_MULTIQUAD || STANDALONE */
-
-#define __IN1(s) \
-static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
-
-#define __IN2(s,s1,s2) \
-__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
-
-#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE)
-#define __IN(s,s1,i...) \
-__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; }
-#else
-/* Make the default portio routines operate on quad 0 */
-#define __IN(s,s1,i...) \
-__IN1(s##_local) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-__IN1(s##_p_local) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-__INQ(s,s) \
-__INQ(s,s##_p)
-#endif /* !CONFIG_MULTIQUAD || STANDALONE */
-
-#define __INS(s) \
-static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; ins" #s \
-: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define __OUTS(s) \
-static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; outs" #s \
-: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define RETURN_TYPE unsigned char
-__IN(b,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned short
-__IN(w,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned int
-__IN(l,"")
-#undef RETURN_TYPE
-
-__OUT(b,"b",char)
-__OUT(w,"w",short)
-__OUT(l,,int)
-
-__INS(b)
-__INS(w)
-__INS(l)
-
-__OUTS(b)
-__OUTS(w)
-__OUTS(l)
-
-#endif
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/irq.h b/xenolinux-2.4.26-sparse/include/asm-xen/irq.h
deleted file mode 100644
index 668195e9ca..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/irq.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef _ASM_IRQ_H
-#define _ASM_IRQ_H
-
-/*
- * linux/include/asm/irq.h
- *
- * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- *
- * IRQ/IPI changes taken from work by Thomas Radke
- * <tomsoft@informatik.tu-chemnitz.de>
- */
-
-#include <linux/config.h>
-#include <asm/hypervisor.h>
-#include <asm/ptrace.h>
-
-/*
- * The flat IRQ space is divided into two regions:
- * 1. A one-to-one mapping of real physical IRQs. This space is only used
- * if we have physical device-access privilege. This region is at the
- * start of the IRQ space so that existing device drivers do not need
- * to be modified to translate physical IRQ numbers into our IRQ space.
- * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
- * are bound using the provided bind/unbind functions.
- */
-
-#define PIRQ_BASE 0
-#define NR_PIRQS 128
-
-#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-#define NR_DYNIRQS 128
-
-#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
-
-#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
-#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
-
-#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
-#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
-
-/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
-extern int bind_virq_to_irq(int virq);
-extern void unbind_virq_from_irq(int virq);
-extern int bind_evtchn_to_irq(int evtchn);
-extern void unbind_evtchn_from_irq(int evtchn);
-
-static __inline__ int irq_cannonicalize(int irq)
-{
- return (irq == 2) ? 9 : irq;
-}
-
-extern void disable_irq(unsigned int);
-extern void disable_irq_nosync(unsigned int);
-extern void enable_irq(unsigned int);
-
-extern void irq_suspend(void);
-extern void irq_resume(void);
-
-#endif /* _ASM_IRQ_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/keyboard.h b/xenolinux-2.4.26-sparse/include/asm-xen/keyboard.h
deleted file mode 100644
index 9066a3bada..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/keyboard.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* Portions copyright (c) 2003 James Scott, Intel Research Cambridge */
-/*
- * Talks to hypervisor to get PS/2 keyboard and mouse events, and send keyboard
- * and mouse commands
- */
-
-/* Based on:
- * linux/include/asm-i386/keyboard.h
- *
- * Created 3 Nov 1996 by Geert Uytterhoeven
- */
-
-#ifndef _XEN_KEYBOARD_H
-#define _XEN_KEYBOARD_H
-
-#ifdef __KERNEL__
-
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/kd.h>
-#include <linux/pm.h>
-#include <asm/io.h>
-
-extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
-extern int pckbd_getkeycode(unsigned int scancode);
-extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
- char raw_mode);
-extern char pckbd_unexpected_up(unsigned char keycode);
-extern void pckbd_leds(unsigned char leds);
-extern void pckbd_init_hw(void);
-extern int pckbd_pm_resume(struct pm_dev *, pm_request_t, void *);
-
-extern pm_callback pm_kbd_request_override;
-extern unsigned char pckbd_sysrq_xlate[128];
-
-#define kbd_setkeycode pckbd_setkeycode
-#define kbd_getkeycode pckbd_getkeycode
-#define kbd_translate pckbd_translate
-#define kbd_unexpected_up pckbd_unexpected_up
-#define kbd_leds pckbd_leds
-#define kbd_init_hw pckbd_init_hw
-#define kbd_sysrq_xlate pckbd_sysrq_xlate
-
-#define SYSRQ_KEY 0x54
-
-
-/* THIS SECTION TALKS TO XEN TO DO PS2 SUPPORT */
-#include <asm/hypervisor-ifs/kbd.h>
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-
-#define kbd_controller_present xen_kbd_controller_present
-
-static inline int xen_kbd_controller_present ()
-{
- return start_info.flags & SIF_INITDOMAIN;
-}
-
-/* resource allocation */
-#define kbd_request_region() \
- do { } while (0)
-#define kbd_request_irq(handler) \
- do { \
- int irq = bind_virq_to_irq(VIRQ_PS2); \
- request_irq(irq, handler, 0, "ps/2", NULL); \
- } while ( 0 )
-
-/* could implement these with command to xen to filter mouse stuff... */
-#define aux_request_irq(hand, dev_id) 0
-#define aux_free_irq(dev_id) do { } while(0)
-
-/* Some stoneage hardware needs delays after some operations. */
-#define kbd_pause() do { } while(0)
-
-static unsigned char kbd_current_scancode = 0;
-
-static unsigned char kbd_read_input(void)
-{
- return kbd_current_scancode;
-}
-
-static unsigned char kbd_read_status(void)
-{
- long res;
- res = HYPERVISOR_kbd_op(KBD_OP_READ,0);
- if ( res<0 )
- {
- kbd_current_scancode = 0;
- return 0; /* error with our request - wrong domain? */
- }
- kbd_current_scancode = KBD_CODE_SCANCODE(res);
- return KBD_CODE_STATUS(res);
-}
-
-
-#define kbd_write_output(val) HYPERVISOR_kbd_op(KBD_OP_WRITEOUTPUT, val);
-#define kbd_write_command(val) HYPERVISOR_kbd_op(KBD_OP_WRITECOMMAND, val);
-
-
-#endif /* __KERNEL__ */
-#endif /* _XEN_KEYBOARD_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/mmu_context.h b/xenolinux-2.4.26-sparse/include/asm-xen/mmu_context.h
deleted file mode 100644
index 7972ce7d74..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/mmu_context.h
+++ /dev/null
@@ -1,74 +0,0 @@
-#ifndef __I386_MMU_CONTEXT_H
-#define __I386_MMU_CONTEXT_H
-
-#include <linux/config.h>
-#include <asm/desc.h>
-#include <asm/atomic.h>
-#include <asm/pgalloc.h>
-
-/*
- * hooks to add arch specific data into the mm struct.
- * Note that destroy_context is called even if init_new_context
- * fails.
- */
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-void destroy_context(struct mm_struct *mm);
-
-#ifdef CONFIG_SMP
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
-{
- if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
- cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
-}
-#else
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
-{
-}
-#endif
-
-extern pgd_t *cur_pgd;
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
-{
- if (prev != next) {
- /* stop flush ipis for the previous mm */
- clear_bit(cpu, &prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
- cpu_tlbstate[cpu].state = TLBSTATE_OK;
- cpu_tlbstate[cpu].active_mm = next;
-#endif
-
- /* Re-load page tables */
- cur_pgd = next->pgd;
- queue_pt_switch(__pa(cur_pgd));
- /* load_LDT, if either the previous or next thread
- * has a non-default LDT.
- */
- if (next->context.size+prev->context.size)
- load_LDT(&next->context);
- }
-#ifdef CONFIG_SMP
- else {
- cpu_tlbstate[cpu].state = TLBSTATE_OK;
- if(cpu_tlbstate[cpu].active_mm != next)
- out_of_line_bug();
- if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
- * tlb flush IPI delivery. We must reload %cr3.
- */
- cur_pgd = next->pgd;
- queue_pt_switch(__pa(cur_pgd));
- load_LDT(next);
- }
- }
-#endif
-}
-
-#define activate_mm(prev, next) \
-do { \
- switch_mm((prev),(next),NULL,smp_processor_id()); \
- flush_page_update_queue(); \
-} while ( 0 )
-
-#endif
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/msr.h b/xenolinux-2.4.26-sparse/include/asm-xen/msr.h
deleted file mode 100644
index 1a2c8765a8..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/msr.h
+++ /dev/null
@@ -1,138 +0,0 @@
-#ifndef __ASM_MSR_H
-#define __ASM_MSR_H
-
-/*
- * Access to machine-specific registers (available on 586 and better only)
- * Note: the rd* operations modify the parameters directly (without using
- * pointer indirection), this allows gcc to optimize better
- */
-
-#define rdmsr(msr,val1,val2) \
-{ \
- dom0_op_t op; \
- op.cmd = DOM0_MSR; \
- op.u.msr.write = 0; \
- op.u.msr.msr = msr; \
- op.u.msr.cpu_mask = (1 << current->processor); \
- HYPERVISOR_dom0_op(&op); \
- val1 = op.u.msr.out1; \
- val2 = op.u.msr.out2; \
-}
-
-#define wrmsr(msr,val1,val2) \
-{ \
- dom0_op_t op; \
- op.cmd = DOM0_MSR; \
- op.u.msr.write = 1; \
- op.u.msr.cpu_mask = (1 << current->processor); \
- op.u.msr.msr = msr; \
- op.u.msr.in1 = val1; \
- op.u.msr.in2 = val2; \
- HYPERVISOR_dom0_op(&op); \
-}
-
-#define rdtsc(low,high) \
- __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-
-#define rdtscl(low) \
- __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
-
-#define rdtscll(val) \
- __asm__ __volatile__("rdtsc" : "=A" (val))
-
-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
-
-#define rdpmc(counter,low,high) \
- __asm__ __volatile__("rdpmc" \
- : "=a" (low), "=d" (high) \
- : "c" (counter))
-
-/* symbolic names for some interesting MSRs */
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR 0
-#define MSR_IA32_P5_MC_TYPE 1
-#define MSR_IA32_PLATFORM_ID 0x17
-#define MSR_IA32_EBL_CR_POWERON 0x2a
-
-#define MSR_IA32_APICBASE 0x1b
-#define MSR_IA32_APICBASE_BSP (1<<8)
-#define MSR_IA32_APICBASE_ENABLE (1<<11)
-#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-
-#define MSR_IA32_UCODE_WRITE 0x79
-#define MSR_IA32_UCODE_REV 0x8b
-
-#define MSR_IA32_BBL_CR_CTL 0x119
-
-#define MSR_IA32_MCG_CAP 0x179
-#define MSR_IA32_MCG_STATUS 0x17a
-#define MSR_IA32_MCG_CTL 0x17b
-
-#define MSR_IA32_THERM_CONTROL 0x19a
-#define MSR_IA32_THERM_INTERRUPT 0x19b
-#define MSR_IA32_THERM_STATUS 0x19c
-#define MSR_IA32_MISC_ENABLE 0x1a0
-
-#define MSR_IA32_DEBUGCTLMSR 0x1d9
-#define MSR_IA32_LASTBRANCHFROMIP 0x1db
-#define MSR_IA32_LASTBRANCHTOIP 0x1dc
-#define MSR_IA32_LASTINTFROMIP 0x1dd
-#define MSR_IA32_LASTINTTOIP 0x1de
-
-#define MSR_IA32_MC0_CTL 0x400
-#define MSR_IA32_MC0_STATUS 0x401
-#define MSR_IA32_MC0_ADDR 0x402
-#define MSR_IA32_MC0_MISC 0x403
-
-#define MSR_P6_PERFCTR0 0xc1
-#define MSR_P6_PERFCTR1 0xc2
-#define MSR_P6_EVNTSEL0 0x186
-#define MSR_P6_EVNTSEL1 0x187
-
-#define MSR_IA32_PERF_STATUS 0x198
-#define MSR_IA32_PERF_CTL 0x199
-
-/* AMD Defined MSRs */
-#define MSR_K6_EFER 0xC0000080
-#define MSR_K6_STAR 0xC0000081
-#define MSR_K6_WHCR 0xC0000082
-#define MSR_K6_UWCCR 0xC0000085
-#define MSR_K6_EPMR 0xC0000086
-#define MSR_K6_PSOR 0xC0000087
-#define MSR_K6_PFIR 0xC0000088
-
-#define MSR_K7_EVNTSEL0 0xC0010000
-#define MSR_K7_PERFCTR0 0xC0010004
-#define MSR_K7_HWCR 0xC0010015
-#define MSR_K7_CLK_CTL 0xC001001b
-#define MSR_K7_FID_VID_CTL 0xC0010041
-#define MSR_K7_VID_STATUS 0xC0010042
-
-/* Centaur-Hauls/IDT defined MSRs. */
-#define MSR_IDT_FCR1 0x107
-#define MSR_IDT_FCR2 0x108
-#define MSR_IDT_FCR3 0x109
-#define MSR_IDT_FCR4 0x10a
-
-#define MSR_IDT_MCR0 0x110
-#define MSR_IDT_MCR1 0x111
-#define MSR_IDT_MCR2 0x112
-#define MSR_IDT_MCR3 0x113
-#define MSR_IDT_MCR4 0x114
-#define MSR_IDT_MCR5 0x115
-#define MSR_IDT_MCR6 0x116
-#define MSR_IDT_MCR7 0x117
-#define MSR_IDT_MCR_CTRL 0x120
-
-/* VIA Cyrix defined MSRs*/
-#define MSR_VIA_FCR 0x1107
-#define MSR_VIA_LONGHAUL 0x110a
-#define MSR_VIA_BCR2 0x1147
-
-/* Transmeta defined MSRs */
-#define MSR_TMTA_LONGRUN_CTRL 0x80868010
-#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
-#define MSR_TMTA_LRTI_READOUT 0x80868018
-#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
-
-#endif /* __ASM_MSR_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/multicall.h b/xenolinux-2.4.26-sparse/include/asm-xen/multicall.h
deleted file mode 100644
index f0ea5c3a66..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/multicall.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/******************************************************************************
- * multicall.h
- */
-
-#ifndef __MULTICALL_H__
-#define __MULTICALL_H__
-
-#include <asm/hypervisor.h>
-
-extern multicall_entry_t multicall_list[];
-extern int nr_multicall_ents;
-
-static inline void queue_multicall0(unsigned long op)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall1(unsigned long op, unsigned long arg1)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall2(
- unsigned long op, unsigned long arg1, unsigned long arg2)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall3(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall4(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- multicall_list[i].args[3] = arg4;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall5(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4, unsigned long arg5)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- multicall_list[i].args[3] = arg4;
- multicall_list[i].args[4] = arg5;
- nr_multicall_ents = i+1;
-}
-
-static inline void execute_multicall_list(void)
-{
- if ( unlikely(nr_multicall_ents == 0) ) return;
- (void)HYPERVISOR_multicall(multicall_list, nr_multicall_ents);
- nr_multicall_ents = 0;
-}
-
-#endif /* __MULTICALL_H__ */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/page.h b/xenolinux-2.4.26-sparse/include/asm-xen/page.h
deleted file mode 100644
index b7640a7d78..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/page.h
+++ /dev/null
@@ -1,173 +0,0 @@
-#ifndef _I386_PAGE_H
-#define _I386_PAGE_H
-
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/config.h>
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-
-#ifdef CONFIG_X86_USE_3DNOW
-
-#include <asm/mmx.h>
-
-#define clear_page(page) mmx_clear_page((void *)(page))
-#define copy_page(to,from) mmx_copy_page(to,from)
-
-#else
-
-/*
- * On older X86 processors its not a win to use MMX here it seems.
- * Maybe the K6-III ?
- */
-
-#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
-#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
-
-#endif
-
-#define clear_user_page(page, vaddr) clear_page(page)
-#define copy_user_page(to, from, vaddr) copy_page(to, from)
-
-/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-extern unsigned long *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
-#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
-static inline unsigned long phys_to_machine(unsigned long phys)
-{
- unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
- machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
- return machine;
-}
-static inline unsigned long machine_to_phys(unsigned long machine)
-{
- unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
- phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
- return phys;
-}
-
-/*
- * These are used to make use of C type-checking..
- */
-#if CONFIG_X86_PAE
-typedef struct { unsigned long pte_low, pte_high; } pte_t;
-typedef struct { unsigned long long pmd; } pmd_t;
-typedef struct { unsigned long long pgd; } pgd_t;
-#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
-#else
-typedef struct { unsigned long pte_low; } pte_t;
-typedef struct { unsigned long pmd; } pmd_t;
-typedef struct { unsigned long pgd; } pgd_t;
-static inline unsigned long pte_val(pte_t x)
-{
- unsigned long ret = x.pte_low;
- if ( (ret & 1) ) ret = machine_to_phys(ret);
- return ret;
-}
-#endif
-#define PTE_MASK PAGE_MASK
-
-typedef struct { unsigned long pgprot; } pgprot_t;
-
-static inline unsigned long pmd_val(pmd_t x)
-{
- unsigned long ret = x.pmd;
- if ( (ret & 1) ) ret = machine_to_phys(ret);
- return ret;
-}
-#define pgd_val(x) ({ BUG(); (unsigned long)0; })
-#define pgprot_val(x) ((x).pgprot)
-
-static inline pte_t __pte(unsigned long x)
-{
- if ( (x & 1) ) x = phys_to_machine(x);
- return ((pte_t) { (x) });
-}
-static inline pmd_t __pmd(unsigned long x)
-{
- if ( (x & 1) ) x = phys_to_machine(x);
- return ((pmd_t) { (x) });
-}
-#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
-#define __pgprot(x) ((pgprot_t) { (x) } )
-
-#endif /* !__ASSEMBLY__ */
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-/*
- * This handles the memory map.. We could make this a config
- * option, but too many people screw it up, and too few need
- * it.
- *
- * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
- * a virtual address space of one gigabyte, which limits the
- * amount of physical memory you can use to about 950MB.
- *
- * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
- * and CONFIG_HIGHMEM64G options in the kernel configuration.
- */
-
-#define __PAGE_OFFSET (0xC0000000)
-
-#ifndef __ASSEMBLY__
-
-/*
- * Tell the user there is some problem. Beep too, so we can
- * see^H^H^Hhear bugs in early bootup as well!
- * The offending file and line are encoded after the "officially
- * undefined" opcode for parsing in the trap handler.
- */
-
-#if 1 /* Set to zero for a slightly smaller kernel */
-#define BUG() \
- __asm__ __volatile__( "ud2\n" \
- "\t.word %c0\n" \
- "\t.long %c1\n" \
- : : "i" (__LINE__), "i" (__FILE__))
-#else
-#define BUG() __asm__ __volatile__("ud2\n")
-#endif
-
-#define PAGE_BUG(page) do { \
- BUG(); \
-} while (0)
-
-/* Pure 2^n version of get_order */
-static __inline__ int get_order(unsigned long size)
-{
- int order;
-
- size = (size-1) >> (PAGE_SHIFT-1);
- order = -1;
- do {
- size >>= 1;
- order++;
- } while (size);
- return order;
-}
-
-#endif /* __ASSEMBLY__ */
-
-#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
-#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
-#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
-
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-/* VIRT <-> MACHINE conversion */
-#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
-#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
-
-#endif /* __KERNEL__ */
-
-#endif /* _I386_PAGE_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/pci.h b/xenolinux-2.4.26-sparse/include/asm-xen/pci.h
deleted file mode 100644
index 74ae5ba8b1..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/pci.h
+++ /dev/null
@@ -1,283 +0,0 @@
-#ifndef __i386_PCI_H
-#define __i386_PCI_H
-
-#include <linux/config.h>
-
-#ifdef __KERNEL__
-
-/* Can be used to override the logic in pci_scan_bus for skipping
- already-configured bus numbers - to be used for buggy BIOSes
- or architectures with incomplete PCI setup by the loader */
-
-#ifdef CONFIG_PCI
-extern unsigned int pcibios_assign_all_busses(void);
-#else
-#define pcibios_assign_all_busses() 0
-#endif
-#define pcibios_scan_all_fns() 0
-
-extern unsigned long pci_mem_start;
-#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM (pci_mem_start)
-
-void pcibios_config_init(void);
-struct pci_bus * pcibios_scan_root(int bus);
-extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
-extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
-
-void pcibios_set_master(struct pci_dev *dev);
-void pcibios_penalize_isa_irq(int irq);
-struct irq_routing_table *pcibios_get_irq_routing_table(void);
-int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-
-/* Dynamic DMA mapping stuff.
- * i386 has everything mapped statically.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/scatterlist.h>
-#include <linux/string.h>
-#include <asm/io.h>
-
-struct pci_dev;
-
-/* The networking and block device layers use this boolean for bounce
- * buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS (0)
-
-/* Allocate and map kernel buffer using consistent mode DMA for a device.
- * hwdev should be valid struct pci_dev pointer for PCI devices,
- * NULL for PCI-like buses (ISA, EISA).
- * Returns non-NULL cpu-view pointer to the buffer if successful and
- * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
- * is undefined.
- */
-extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle);
-
-/* Free and unmap a consistent DMA buffer.
- * cpu_addr is what was returned from pci_alloc_consistent,
- * size must be the same as what as passed into pci_alloc_consistent,
- * and likewise dma_addr must be the same as what *dma_addrp was set to.
- *
- * References to the memory and mappings associated with cpu_addr/dma_addr
- * past this call are illegal.
- */
-extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-/* Map a single buffer of the indicated size for DMA in streaming mode.
- * The 32-bit bus address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single is performed.
- */
-static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
- return virt_to_bus(ptr);
-}
-
-/* Unmap a single streaming mode DMA translation. The dma_addr and size
- * must match what was provided for in a previous pci_map_single call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guarenteed to see
- * whatever the device wrote there.
- */
-static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
-}
-
-/*
- * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
- * to pci_map_single, but takes a struct page instead of a virtual address
- */
-static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
-
- return page_to_bus(page) + offset;
-}
-
-static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
-}
-
-/* pci_unmap_{page,single} is a nop so... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-#define pci_unmap_addr(PTR, ADDR_NAME) (0)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define pci_unmap_len(PTR, LEN_NAME) (0)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scather-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- int i;
-
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
-
- /*
- * temporary 2.4 hack
- */
- for (i = 0; i < nents; i++ ) {
- if (sg[i].address && sg[i].page)
- out_of_line_bug();
- else if (!sg[i].address && !sg[i].page)
- out_of_line_bug();
-
- if (sg[i].address)
- sg[i].dma_address = virt_to_bus(sg[i].address);
- else
- sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
- }
-
- flush_write_buffers();
- return nents;
-}
-
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
-}
-
-/* Make physical memory consistent for a single
- * streaming mode DMA translation after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, the
- * device again owns the buffer.
- */
-static inline void pci_dma_sync_single(struct pci_dev *hwdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
-}
-
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as pci_dma_sync_single but for a scatter-gather list,
- * same rules and usage.
- */
-static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
-}
-
-/* Return whether the given PCI device DMA address mask can
- * be supported properly. For example, if your device can
- * only drive the low 24-bits during PCI bus mastering, then
- * you would pass 0x00ffffff as the mask to this function.
- */
-static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if(mask < 0x00ffffff)
- return 0;
-
- return 1;
-}
-
-/* This is always fine. */
-#define pci_dac_dma_supported(pci_dev, mask) (1)
-
-static __inline__ dma64_addr_t
-pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-{
- return ((dma64_addr_t) page_to_bus(page) +
- (dma64_addr_t) offset);
-}
-
-static __inline__ struct page *
-pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return bus_to_page(dma_addr);
-}
-
-static __inline__ unsigned long
-pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return (dma_addr & ~PAGE_MASK);
-}
-
-static __inline__ void
-pci_dac_dma_sync_single(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-{
- flush_write_buffers();
-}
-
-/* These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
-
-/* Return the index of the PCI controller for device. */
-static inline int pci_controller_num(struct pci_dev *dev)
-{
- return 0;
-}
-
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-
-#endif /* __KERNEL__ */
-
-#endif /* __i386_PCI_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/pgalloc.h b/xenolinux-2.4.26-sparse/include/asm-xen/pgalloc.h
deleted file mode 100644
index 143beeeef5..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/pgalloc.h
+++ /dev/null
@@ -1,286 +0,0 @@
-#ifndef _I386_PGALLOC_H
-#define _I386_PGALLOC_H
-
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/fixmap.h>
-#include <asm/hypervisor.h>
-#include <linux/threads.h>
-
-/*
- * Quick lists are aligned so that least significant bits of array pointer
- * are all zero when list is empty, and all one when list is full.
- */
-#define QUICKLIST_ENTRIES 256
-#define QUICKLIST_EMPTY(_l) !((unsigned long)(_l) & ((QUICKLIST_ENTRIES*4)-1))
-#define QUICKLIST_FULL(_l) QUICKLIST_EMPTY((_l)+1)
-#define pgd_quicklist (current_cpu_data.pgd_quick)
-#define pmd_quicklist (current_cpu_data.pmd_quick)
-#define pte_quicklist (current_cpu_data.pte_quick)
-#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
-
-#define pmd_populate(mm, pmd, pte) \
- do { \
- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
- XEN_flush_page_update_queue(); \
- } while ( 0 )
-
-/*
- * Allocate and free page tables.
- */
-
-#if defined (CONFIG_X86_PAE)
-
-#error "no PAE support as yet"
-
-/*
- * We can't include <linux/slab.h> here, thus these uglinesses.
- */
-struct kmem_cache_s;
-
-extern struct kmem_cache_s *pae_pgd_cachep;
-extern void *kmem_cache_alloc(struct kmem_cache_s *, int);
-extern void kmem_cache_free(struct kmem_cache_s *, void *);
-
-
-static inline pgd_t *get_pgd_slow(void)
-{
- int i;
- pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
-
- if (pgd) {
- for (i = 0; i < USER_PTRS_PER_PGD; i++) {
- unsigned long pmd = __get_free_page(GFP_KERNEL);
- if (!pmd)
- goto out_oom;
- clear_page(pmd);
- set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
- }
- memcpy(pgd + USER_PTRS_PER_PGD,
- swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
- return pgd;
-out_oom:
- for (i--; i >= 0; i--)
- free_page((unsigned long)__va(pgd_val(pgd[i])-1));
- kmem_cache_free(pae_pgd_cachep, pgd);
- return NULL;
-}
-
-#else
-
-static inline pgd_t *get_pgd_slow(void)
-{
- pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
-
- if (pgd) {
- memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy(pgd + USER_PTRS_PER_PGD,
- init_mm.pgd + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- __make_page_readonly(pgd);
- queue_pgd_pin(__pa(pgd));
-
- }
- return pgd;
-}
-
-#endif /* CONFIG_X86_PAE */
-
-static inline pgd_t *get_pgd_fast(void)
-{
- unsigned long ret;
-
- if ( !QUICKLIST_EMPTY(pgd_quicklist) ) {
- ret = *(--pgd_quicklist);
- pgtable_cache_size--;
-
- } else
- ret = (unsigned long)get_pgd_slow();
- return (pgd_t *)ret;
-}
-
-static inline void free_pgd_slow(pgd_t *pgd)
-{
-#if defined(CONFIG_X86_PAE)
-#error
- int i;
-
- for (i = 0; i < USER_PTRS_PER_PGD; i++)
- free_page((unsigned long)__va(pgd_val(pgd[i])-1));
- kmem_cache_free(pae_pgd_cachep, pgd);
-#else
- queue_pgd_unpin(__pa(pgd));
- __make_page_writeable(pgd);
- free_page((unsigned long)pgd);
-#endif
-}
-
-static inline void free_pgd_fast(pgd_t *pgd)
-{
- if ( !QUICKLIST_FULL(pgd_quicklist) ) {
- *(pgd_quicklist++) = (unsigned long)pgd;
- pgtable_cache_size++;
- } else
- free_pgd_slow(pgd);
-}
-
-static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- pte_t *pte;
-
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
- if (pte)
- {
- clear_page(pte);
- __make_page_readonly(pte);
- queue_pte_pin(__pa(pte));
- }
- return pte;
-
-}
-
-static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
- unsigned long address)
-{
- unsigned long ret = 0;
- if ( !QUICKLIST_EMPTY(pte_quicklist) ) {
- ret = *(--pte_quicklist);
- pgtable_cache_size--;
- }
- return (pte_t *)ret;
-}
-
-static __inline__ void pte_free_slow(pte_t *pte)
-{
- queue_pte_unpin(__pa(pte));
- __make_page_writeable(pte);
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free_fast(pte_t *pte)
-{
- if ( !QUICKLIST_FULL(pte_quicklist) ) {
- *(pte_quicklist++) = (unsigned long)pte;
- pgtable_cache_size++;
- } else
- pte_free_slow(pte);
-}
-
-#define pte_free(pte) pte_free_fast(pte)
-#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc(mm) get_pgd_fast()
-
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- * (In the PAE case we free the pmds as part of the pgd.)
- */
-
-#define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
-#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free_slow(x) do { } while (0)
-#define pmd_free_fast(x) do { } while (0)
-#define pmd_free(x) do { } while (0)
-#define pgd_populate(mm, pmd, pte) BUG()
-
-extern int do_check_pgt_cache(int, int);
-
-/*
- * TLB flushing:
- *
- * - flush_tlb() flushes the current mm struct TLBs
- * - flush_tlb_all() flushes all processes TLBs
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
- *
- * ..but the i386 has somewhat limited tlb flushing capabilities,
- * and page-granular flushes are available only on i486 and up.
- */
-
-#ifndef CONFIG_SMP
-
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- if (mm == current->active_mm) queue_tlb_flush();
- XEN_flush_page_update_queue();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- if (vma->vm_mm == current->active_mm) queue_invlpg(addr);
- XEN_flush_page_update_queue();
-}
-
-static inline void flush_tlb_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- if (mm == current->active_mm) queue_tlb_flush();
- XEN_flush_page_update_queue();
-}
-
-#else
-#error no guestos SMP support yet...
-#include <asm/smp.h>
-
-#define local_flush_tlb() \
- __flush_tlb()
-
-extern void flush_tlb_all(void);
-extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-
-#define flush_tlb() flush_tlb_current_task()
-
-static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
-{
- flush_tlb_mm(mm);
-}
-
-#define TLBSTATE_OK 1
-#define TLBSTATE_LAZY 2
-
-struct tlb_state
-{
- struct mm_struct *active_mm;
- int state;
-} ____cacheline_aligned;
-extern struct tlb_state cpu_tlbstate[NR_CPUS];
-
-#endif /* CONFIG_SMP */
-
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- /* i386 does not keep any page table caches in TLB */
- XEN_flush_page_update_queue();
-}
-
-/*
- * NB. The 'domid' field should be zero if mapping I/O space (non RAM).
- * Otherwise it identifies the owner of the memory that is being mapped.
- */
-extern int direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long machine_addr,
- unsigned long size,
- pgprot_t prot,
- domid_t domid);
-
-extern int __direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long size,
- mmu_update_t *v);
-
-
-
-#endif /* _I386_PGALLOC_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/pgtable-2level.h b/xenolinux-2.4.26-sparse/include/asm-xen/pgtable-2level.h
deleted file mode 100644
index 162ba1fbed..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/pgtable-2level.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#ifndef _I386_PGTABLE_2LEVEL_H
-#define _I386_PGTABLE_2LEVEL_H
-
-/*
- * traditional i386 two-level paging structure:
- */
-
-#define PGDIR_SHIFT 22
-#define PTRS_PER_PGD 1024
-
-/*
- * the i386 is two-level, so we don't really have any
- * PMD directory physically.
- */
-#define PMD_SHIFT 22
-#define PTRS_PER_PMD 1
-
-#define PTRS_PER_PTE 1024
-
-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
-#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pgd_present(pgd_t pgd) { return 1; }
-#define pgd_clear(xp) do { } while (0)
-
-#define set_pte(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
-#define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
-#define set_pmd(pmdptr, pmdval) queue_l2_entry_update((pmdptr), (pmdval).pmd)
-#define set_pgd(pgdptr, pgdval) ((void)0)
-
-#define pgd_page(pgd) \
-((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
-
-static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
-{
- return (pmd_t *) dir;
-}
-
-#define pte_same(a, b) ((a).pte_low == (b).pte_low)
-#define pte_page(x) (mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT))))
-#define pte_none(x) (!(x).pte_low)
-#define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
-
-/*
- * A note on implementation of this atomic 'get-and-clear' operation.
- * This is actually very simple because XenoLinux can only run on a single
- * processor. Therefore, we cannot race other processors setting the 'accessed'
- * or 'dirty' bits on a page-table entry.
- * Even if pages are shared between domains, that is not a problem because
- * each domain will have separate page tables, with their own versions of
- * accessed & dirty state.
- */
-static inline pte_t ptep_get_and_clear(pte_t *xp)
-{
- pte_t pte = *xp;
- if ( !pte_none(pte) )
- queue_l1_entry_update(xp, 0);
- return pte;
-}
-
-#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/pgtable.h b/xenolinux-2.4.26-sparse/include/asm-xen/pgtable.h
deleted file mode 100644
index 38721e4cff..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/pgtable.h
+++ /dev/null
@@ -1,374 +0,0 @@
-#ifndef _I386_PGTABLE_H
-#define _I386_PGTABLE_H
-
-#include <linux/config.h>
-
-/*
- * The Linux memory management assumes a three-level page table setup. On
- * the i386, we use that, but "fold" the mid level into the top-level page
- * table, so that we physically have the same two-level page table as the
- * i386 mmu expects.
- *
- * This file contains the functions and defines necessary to modify and use
- * the i386 page table tree.
- */
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#include <asm/hypervisor.h>
-#include <linux/threads.h>
-#include <asm/fixmap.h>
-
-#ifndef _I386_BITOPS_H
-#include <asm/bitops.h>
-#endif
-
-#define swapper_pg_dir 0
-extern void paging_init(void);
-
-/* Caches aren't brain-dead on the intel. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(mm, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr) do { } while (0)
-#define flush_page_to_ram(page) do { } while (0)
-#define flush_dcache_page(page) do { } while (0)
-#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-
-extern unsigned long pgkern_mask;
-
-#define __flush_tlb() ({ queue_tlb_flush(); XEN_flush_page_update_queue(); })
-#define __flush_tlb_global() __flush_tlb()
-#define __flush_tlb_all() __flush_tlb_global()
-#define __flush_tlb_one(addr) ({ queue_invlpg(addr); XEN_flush_page_update_queue(); })
-#define __flush_tlb_single(addr) ({ queue_invlpg(addr); XEN_flush_page_update_queue(); })
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
-#endif /* !__ASSEMBLY__ */
-
-/*
- * The Linux x86 paging architecture is 'compile-time dual-mode', it
- * implements both the traditional 2-level x86 page tables and the
- * newer 3-level PAE-mode page tables.
- */
-#ifndef __ASSEMBLY__
-#if CONFIG_X86_PAE
-# include <asm/pgtable-3level.h>
-
-/*
- * Need to initialise the X86 PAE caches
- */
-extern void pgtable_cache_init(void);
-
-#else
-# include <asm/pgtable-2level.h>
-
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
-#endif
-#endif
-
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_PGD_NR 0
-
-#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
-#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
-
-#define TWOLEVEL_PGDIR_SHIFT 22
-#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
-#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
-
-
-#ifndef __ASSEMBLY__
-/* 4MB is just a nice "safety zone". Also, we align to a fresh pde. */
-#define VMALLOC_OFFSET (4*1024*1024)
-extern void * high_memory;
-#define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
- ~(VMALLOC_OFFSET-1))
-#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#if CONFIG_HIGHMEM
-# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
-#else
-# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
-#endif
-
-#define _PAGE_BIT_PRESENT 0
-#define _PAGE_BIT_RW 1
-#define _PAGE_BIT_USER 2
-#define _PAGE_BIT_PWT 3
-#define _PAGE_BIT_PCD 4
-#define _PAGE_BIT_ACCESSED 5
-#define _PAGE_BIT_DIRTY 6
-#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
-#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
-#define _PAGE_BIT_IO 9
-
-#define _PAGE_PRESENT 0x001
-#define _PAGE_RW 0x002
-#define _PAGE_USER 0x004
-#define _PAGE_PWT 0x008
-#define _PAGE_PCD 0x010
-#define _PAGE_ACCESSED 0x020
-#define _PAGE_DIRTY 0x040
-#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
-#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
-#define _PAGE_IO 0x200
-
-#define _PAGE_PROTNONE 0x080 /* If not present */
-
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-
-#define __PAGE_KERNEL \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_NOCACHE \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_RO \
- (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
-
-#if 0
-#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
-#else
-#define MAKE_GLOBAL(x) __pgprot(x)
-#endif
-
-#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
-
-/*
- * The i386 can't do page protection for execute, and considers that
- * the same are read. Also, write permissions imply read permissions.
- * This is the closest we can get..
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
-
-#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(xp) queue_l1_entry_update(xp, 0)
-
-#define pmd_none(x) (!(x).pmd)
-#define pmd_present(x) ((x).pmd & _PAGE_PRESENT)
-#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-#define pmd_bad(x) (((x).pmd & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-
-
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
-static inline int pte_exec(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
-static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
-static inline int pte_io(pte_t pte) { return (pte).pte_low & _PAGE_IO; }
-
-static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
-static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
-static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
-static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
-static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkio(pte_t pte) { (pte).pte_low |= _PAGE_IO; return pte; }
-
-static inline int ptep_test_and_clear_dirty(pte_t *ptep)
-{
- unsigned long pteval = *(unsigned long *)ptep;
- int ret = pteval & _PAGE_DIRTY;
- if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_DIRTY);
- return ret;
-}
-static inline int ptep_test_and_clear_young(pte_t *ptep)
-{
- unsigned long pteval = *(unsigned long *)ptep;
- int ret = pteval & _PAGE_ACCESSED;
- if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_ACCESSED);
- return ret;
-}
-static inline void ptep_set_wrprotect(pte_t *ptep)
-{
- unsigned long pteval = *(unsigned long *)ptep;
- if ( (pteval & _PAGE_RW) )
- queue_l1_entry_update(ptep, pteval & ~_PAGE_RW);
-}
-static inline void ptep_mkdirty(pte_t *ptep)
-{
- unsigned long pteval = *(unsigned long *)ptep;
- if ( !(pteval & _PAGE_DIRTY) )
- queue_l1_entry_update(ptep, pteval | _PAGE_DIRTY);
-}
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-
-#define mk_pte(page, pgprot) __mk_pte((page) - mem_map, (pgprot))
-
-/* This takes a physical page address that is used by the remapping functions */
-#define mk_pte_phys(physpage, pgprot) __mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
- pte.pte_low &= _PAGE_CHG_MASK;
- pte.pte_low |= pgprot_val(newprot);
- return pte;
-}
-
-#define page_pte(page) page_pte_prot(page, __pgprot(0))
-
-#define pmd_page(pmd) \
-((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-
-#define __pgd_offset(address) pgd_index(address)
-
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define __pmd_offset(address) \
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* Find an entry in the third-level page table.. */
-#define __pte_offset(address) \
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
- __pte_offset(address))
-
-/*
- * The i386 doesn't have any external MMU info: the kernel page
- * tables contain all the necessary information.
- */
-#define update_mmu_cache(vma,address,pte) do { } while (0)
-
-/* Encode and de-code a swap entry */
-#define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
-#define SWP_OFFSET(x) ((x).val >> 8)
-#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-#define pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
-#define swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-struct page;
-int change_page_attr(struct page *, int, pgprot_t prot);
-
-static inline void __make_page_readonly(void *va)
-{
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
-}
-
-static inline void __make_page_writeable(void *va)
-{
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
-}
-
-static inline void make_page_readonly(void *va)
-{
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
- if ( (unsigned long)va >= VMALLOC_START )
- __make_page_readonly(machine_to_virt(
- *(unsigned long *)pte&PAGE_MASK));
-}
-
-static inline void make_page_writeable(void *va)
-{
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
- if ( (unsigned long)va >= VMALLOC_START )
- __make_page_writeable(machine_to_virt(
- *(unsigned long *)pte&PAGE_MASK));
-}
-
-static inline void make_pages_readonly(void *va, unsigned int nr)
-{
- while ( nr-- != 0 )
- {
- make_page_readonly(va);
- va = (void *)((unsigned long)va + PAGE_SIZE);
- }
-}
-
-static inline void make_pages_writeable(void *va, unsigned int nr)
-{
- while ( nr-- != 0 )
- {
- make_page_writeable(va);
- va = (void *)((unsigned long)va + PAGE_SIZE);
- }
-}
-
-static inline unsigned long arbitrary_virt_to_phys(void *va)
-{
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset(pmd, (unsigned long)va);
- unsigned long pa = (*(unsigned long *)pte) & PAGE_MASK;
- return pa | ((unsigned long)va & (PAGE_SIZE-1));
-}
-
-#endif /* !__ASSEMBLY__ */
-
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-#define PageSkip(page) (0)
-#define kern_addr_valid(addr) (1)
-
-#define io_remap_page_range remap_page_range
-
-#endif /* _I386_PGTABLE_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/proc_cmd.h b/xenolinux-2.4.26-sparse/include/asm-xen/proc_cmd.h
deleted file mode 100644
index 08e452de15..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/proc_cmd.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/******************************************************************************
- * proc_cmd.h
- *
- * Interface to /proc/cmd and /proc/xen/privcmd.
- */
-
-#ifndef __PROC_CMD_H__
-#define __PROC_CMD_H__
-
-typedef struct privcmd_hypercall
-{
- unsigned long op;
- unsigned long arg[5];
-} privcmd_hypercall_t;
-
-typedef struct privcmd_mmap_entry {
- unsigned long va;
- unsigned long mfn;
- unsigned long npages;
-} privcmd_mmap_entry_t;
-
-typedef struct privcmd_mmap {
- int num;
- domid_t dom; /* target domain */
- privcmd_mmap_entry_t *entry;
-} privcmd_mmap_t;
-
-typedef struct privcmd_mmapbatch {
- int num; // number of pages to populate
- domid_t dom; // target domain
- unsigned long addr; // virtual address
- unsigned long *arr; // array of mfns - top nibble set on err
-} privcmd_mmapbatch_t;
-
-typedef struct privcmd_blkmsg
-{
- unsigned long op;
- void *buf;
- int buf_size;
-} privcmd_blkmsg_t;
-
-/*
- * @cmd: IOCTL_PRIVCMD_HYPERCALL
- * @arg: &privcmd_hypercall_t
- * Return: Value returned from execution of the specified hypercall.
- */
-#define IOCTL_PRIVCMD_HYPERCALL \
- _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
-
-/*
- * @cmd: IOCTL_PRIVCMD_INITDOMAIN_EVTCHN
- * @arg: n/a
- * Return: Port associated with domain-controller end of control event channel
- * for the initial domain.
- */
-#define IOCTL_PRIVCMD_INITDOMAIN_EVTCHN \
- _IOC(_IOC_NONE, 'P', 1, 0)
-#define IOCTL_PRIVCMD_MMAP \
- _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
-#define IOCTL_PRIVCMD_MMAPBATCH \
- _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmapbatch_t))
-
-#endif /* __PROC_CMD_H__ */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/processor.h b/xenolinux-2.4.26-sparse/include/asm-xen/processor.h
deleted file mode 100644
index 2b290252be..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/processor.h
+++ /dev/null
@@ -1,484 +0,0 @@
-/*
- * include/asm-i386/processor.h
- *
- * Copyright (C) 1994 Linus Torvalds
- */
-
-#ifndef __ASM_I386_PROCESSOR_H
-#define __ASM_I386_PROCESSOR_H
-
-#include <asm/math_emu.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/sigcontext.h>
-#include <asm/cpufeature.h>
-#include <linux/cache.h>
-#include <linux/config.h>
-#include <linux/threads.h>
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
-
-/*
- * CPU type and hardware bug flags. Kept separately for each CPU.
- * Members of this structure are referenced in head.S, so think twice
- * before touching them. [mj]
- */
-
-struct cpuinfo_x86 {
- __u8 x86; /* CPU family */
- __u8 x86_vendor; /* CPU vendor */
- __u8 x86_model;
- __u8 x86_mask;
- char wp_works_ok; /* It doesn't on 386's */
- char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
- char hard_math;
- char rfu;
- int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
- __u32 x86_capability[NCAPINTS];
- char x86_vendor_id[16];
- char x86_model_id[64];
- int x86_cache_size; /* in KB - valid for CPUS which support this
- call */
- int fdiv_bug;
- int f00f_bug;
- int coma_bug;
- unsigned long loops_per_jiffy;
- unsigned long *pgd_quick;
- unsigned long *pmd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-
-#define X86_VENDOR_INTEL 0
-#define X86_VENDOR_CYRIX 1
-#define X86_VENDOR_AMD 2
-#define X86_VENDOR_UMC 3
-#define X86_VENDOR_NEXGEN 4
-#define X86_VENDOR_CENTAUR 5
-#define X86_VENDOR_RISE 6
-#define X86_VENDOR_TRANSMETA 7
-#define X86_VENDOR_NSC 8
-#define X86_VENDOR_SIS 9
-#define X86_VENDOR_UNKNOWN 0xff
-
-/*
- * capabilities of CPUs
- */
-
-extern struct cpuinfo_x86 boot_cpu_data;
-extern struct tss_struct init_tss[NR_CPUS];
-
-#ifdef CONFIG_SMP
-extern struct cpuinfo_x86 cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
-#else
-#define cpu_data (&boot_cpu_data)
-#define current_cpu_data boot_cpu_data
-#endif
-
-extern char ignore_irq13;
-
-extern void identify_cpu(struct cpuinfo_x86 *);
-extern void print_cpu_info(struct cpuinfo_x86 *);
-
-/*
- * EFLAGS bits
- */
-#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
-#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
-#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
-#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
-#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
-#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
-#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
-#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
-#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
-#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
-#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
-#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
-#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
-#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
-#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
-#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
-#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
-
-/*
- * Generic CPUID function
- */
-static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
-{
- __asm__("cpuid"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (op));
-}
-
-/*
- * CPUID functions returning a single datum
- */
-static inline unsigned int cpuid_eax(unsigned int op)
-{
- unsigned int eax;
-
- __asm__("cpuid"
- : "=a" (eax)
- : "0" (op)
- : "bx", "cx", "dx");
- return eax;
-}
-static inline unsigned int cpuid_ebx(unsigned int op)
-{
- unsigned int eax, ebx;
-
- __asm__("cpuid"
- : "=a" (eax), "=b" (ebx)
- : "0" (op)
- : "cx", "dx" );
- return ebx;
-}
-static inline unsigned int cpuid_ecx(unsigned int op)
-{
- unsigned int eax, ecx;
-
- __asm__("cpuid"
- : "=a" (eax), "=c" (ecx)
- : "0" (op)
- : "bx", "dx" );
- return ecx;
-}
-static inline unsigned int cpuid_edx(unsigned int op)
-{
- unsigned int eax, edx;
-
- __asm__("cpuid"
- : "=a" (eax), "=d" (edx)
- : "0" (op)
- : "bx", "cx");
- return edx;
-}
-
-/*
- * Intel CPU features in CR4
- */
-#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
-#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
-#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
-#define X86_CR4_DE 0x0008 /* enable debugging extensions */
-#define X86_CR4_PSE 0x0010 /* enable page size extensions */
-#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
-#define X86_CR4_MCE 0x0040 /* Machine check enable */
-#define X86_CR4_PGE 0x0080 /* enable global pages */
-#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
-#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
-#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
-
-#define load_cr3(pgdir) \
- asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)));
-
-extern unsigned long mmu_cr4_features;
-
-#include <asm/hypervisor.h>
-
-static inline void set_in_cr4 (unsigned long mask)
-{
- BUG();
-}
-
-static inline void clear_in_cr4 (unsigned long mask)
-{
- BUG();
-}
-
-/*
- * Cyrix CPU configuration register indexes
- */
-#define CX86_CCR0 0xc0
-#define CX86_CCR1 0xc1
-#define CX86_CCR2 0xc2
-#define CX86_CCR3 0xc3
-#define CX86_CCR4 0xe8
-#define CX86_CCR5 0xe9
-#define CX86_CCR6 0xea
-#define CX86_CCR7 0xeb
-#define CX86_DIR0 0xfe
-#define CX86_DIR1 0xff
-#define CX86_ARR_BASE 0xc4
-#define CX86_RCR_BASE 0xdc
-
-/*
- * Cyrix CPU indexed register access macros
- */
-
-#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-
-#define setCx86(reg, data) do { \
- outb((reg), 0x22); \
- outb((data), 0x23); \
-} while (0)
-
-/*
- * Bus types (default is ISA, but people can check others with these..)
- */
-#ifdef CONFIG_EISA
-extern int EISA_bus;
-#else
-#define EISA_bus (0)
-#endif
-extern int MCA_bus;
-
-/* from system description table in BIOS. Mostly for MCA use, but
-others may find it useful. */
-extern unsigned int machine_id;
-extern unsigned int machine_submodel_id;
-extern unsigned int BIOS_revision;
-extern unsigned int mca_pentium_flag;
-
-/*
- * User space process size: 3GB (default).
- */
-#define TASK_SIZE (PAGE_OFFSET)
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
-
-/*
- * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
- */
-#define IO_BITMAP_SIZE 32
-#define IO_BITMAP_BYTES (IO_BITMAP_SIZE * 4)
-#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-#define INVALID_IO_BITMAP_OFFSET 0x8000
-
-struct i387_fsave_struct {
- long cwd;
- long swd;
- long twd;
- long fip;
- long fcs;
- long foo;
- long fos;
- long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
- long status; /* software status information */
-};
-
-struct i387_fxsave_struct {
- unsigned short cwd;
- unsigned short swd;
- unsigned short twd;
- unsigned short fop;
- long fip;
- long fcs;
- long foo;
- long fos;
- long mxcsr;
- long reserved;
- long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
- long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
- long padding[56];
-} __attribute__ ((aligned (16)));
-
-struct i387_soft_struct {
- long cwd;
- long swd;
- long twd;
- long fip;
- long fcs;
- long foo;
- long fos;
- long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
- unsigned char ftop, changed, lookahead, no_update, rm, alimit;
- struct info *info;
- unsigned long entry_eip;
-};
-
-union i387_union {
- struct i387_fsave_struct fsave;
- struct i387_fxsave_struct fxsave;
- struct i387_soft_struct soft;
-};
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-struct tss_struct {
- unsigned short back_link,__blh;
- unsigned long esp0;
- unsigned short ss0,__ss0h;
- unsigned long esp1;
- unsigned short ss1,__ss1h;
- unsigned long esp2;
- unsigned short ss2,__ss2h;
- unsigned long __cr3;
- unsigned long eip;
- unsigned long eflags;
- unsigned long eax,ecx,edx,ebx;
- unsigned long esp;
- unsigned long ebp;
- unsigned long esi;
- unsigned long edi;
- unsigned short es, __esh;
- unsigned short cs, __csh;
- unsigned short ss, __ssh;
- unsigned short ds, __dsh;
- unsigned short fs, __fsh;
- unsigned short gs, __gsh;
- unsigned short ldt, __ldth;
- unsigned short trace, bitmap;
- unsigned long io_bitmap[IO_BITMAP_SIZE+1];
- /*
- * pads the TSS to be cacheline-aligned (size is 0x100)
- */
- unsigned long __cacheline_filler[5];
-};
-
-struct thread_struct {
- unsigned long esp0;
- unsigned long eip;
- unsigned long esp;
- unsigned long fs;
- unsigned long gs;
- unsigned int io_pl;
-/* Hardware debugging registers */
- unsigned long debugreg[8]; /* %%db0-7 debug registers */
-/* fault info */
- unsigned long cr2, trap_no, error_code;
-/* floating point info */
- union i387_union i387;
-/* virtual 86 mode info */
- struct vm86_struct * vm86_info;
- unsigned long screen_bitmap;
- unsigned long v86flags, v86mask, saved_esp0;
-};
-
-#define INIT_THREAD { sizeof(init_stack) + (long) &init_stack, \
- 0, 0, 0, 0, 0, 0, {0}, 0, 0, 0, {{0}}, 0, 0, 0, 0, 0 }
-
-#define INIT_TSS { \
- 0,0, /* back_link, __blh */ \
- sizeof(init_stack) + (long) &init_stack, /* esp0 */ \
- __KERNEL_DS, 0, /* ss0 */ \
- 0,0,0,0,0,0, /* stack1, stack2 */ \
- 0, /* cr3 */ \
- 0,0, /* eip,eflags */ \
- 0,0,0,0, /* eax,ecx,edx,ebx */ \
- 0,0,0,0, /* esp,ebp,esi,edi */ \
- 0,0,0,0,0,0, /* es,cs,ss */ \
- 0,0,0,0,0,0, /* ds,fs,gs */ \
- 0,0, /* ldt */ \
- 0, INVALID_IO_BITMAP_OFFSET, /* tace, bitmap */ \
- {~0, } /* ioperm */ \
-}
-
-#define start_thread(regs, new_eip, new_esp) do { \
- __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
- set_fs(USER_DS); \
- regs->xds = __USER_DS; \
- regs->xes = __USER_DS; \
- regs->xss = __USER_DS; \
- regs->xcs = __USER_CS; \
- regs->eip = new_eip; \
- regs->esp = new_esp; \
-} while (0)
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-struct mm_struct;
-
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-/*
- * create a kernel thread without removing it from tasklists
- */
-extern int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
-/* Copy and release all segment info associated with a VM
- * Unusable due to lack of error handling, use {init_new,destroy}_context
- * instead.
- */
-static inline void copy_segments(struct task_struct *p, struct mm_struct * mm) { }
-static inline void release_segments(struct mm_struct * mm) { }
-
-/*
- * Return saved PC of a blocked thread.
- */
-static inline unsigned long thread_saved_pc(struct thread_struct *t)
-{
- return ((unsigned long *)t->esp)[3];
-}
-
-unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
-#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
-
-#define THREAD_SIZE (2*PAGE_SIZE)
-#define alloc_task_struct() ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
-#define free_task_struct(p) free_pages((unsigned long) (p), 1)
-#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
-
-#define init_task (init_task_union.task)
-#define init_stack (init_task_union.stack)
-
-struct microcode {
- unsigned int hdrver;
- unsigned int rev;
- unsigned int date;
- unsigned int sig;
- unsigned int cksum;
- unsigned int ldrver;
- unsigned int pf;
- unsigned int reserved[5];
- unsigned int bits[500];
-};
-
-/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
-#define MICROCODE_IOCFREE _IO('6',0)
-
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
-{
- __asm__ __volatile__("rep;nop" ::: "memory");
-}
-
-#define cpu_relax() rep_nop()
-
-/* Prefetch instructions for Pentium III and AMD Athlon */
-#if defined(CONFIG_MPENTIUMIII) || defined (CONFIG_MPENTIUM4)
-
-#define ARCH_HAS_PREFETCH
-extern inline void prefetch(const void *x)
-{
- __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
-}
-
-#elif CONFIG_X86_USE_3DNOW
-
-#define ARCH_HAS_PREFETCH
-#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
-
-extern inline void prefetch(const void *x)
-{
- __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
-}
-
-extern inline void prefetchw(const void *x)
-{
- __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
-}
-#define spin_lock_prefetch(x) prefetchw(x)
-
-#endif
-
-#define TF_MASK 0x100
-
-#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/ptrace.h b/xenolinux-2.4.26-sparse/include/asm-xen/ptrace.h
deleted file mode 100644
index 4457ac0b17..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/ptrace.h
+++ /dev/null
@@ -1,63 +0,0 @@
-#ifndef _I386_PTRACE_H
-#define _I386_PTRACE_H
-
-#define EBX 0
-#define ECX 1
-#define EDX 2
-#define ESI 3
-#define EDI 4
-#define EBP 5
-#define EAX 6
-#define DS 7
-#define ES 8
-#define FS 9
-#define GS 10
-#define ORIG_EAX 11
-#define EIP 12
-#define CS 13
-#define EFL 14
-#define UESP 15
-#define SS 16
-#define FRAME_SIZE 17
-
-/* this struct defines the way the registers are stored on the
- stack during a system call. */
-
-struct pt_regs {
- long ebx;
- long ecx;
- long edx;
- long esi;
- long edi;
- long ebp;
- long eax;
- int xds;
- int xes;
- long orig_eax;
- long eip;
- int xcs;
- long eflags;
- long esp;
- int xss;
-};
-
-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-#define PTRACE_GETFPREGS 14
-#define PTRACE_SETFPREGS 15
-#define PTRACE_GETFPXREGS 18
-#define PTRACE_SETFPXREGS 19
-
-#define PTRACE_SETOPTIONS 21
-
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD 0x00000001
-
-#ifdef __KERNEL__
-#define user_mode(regs) ((regs) && (2 & (regs)->xcs))
-#define instruction_pointer(regs) ((regs) ? (regs)->eip : NULL)
-extern void show_regs(struct pt_regs *);
-#endif
-
-#endif
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/segment.h b/xenolinux-2.4.26-sparse/include/asm-xen/segment.h
deleted file mode 100644
index ca13028ce0..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/segment.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_SEGMENT_H
-#define _ASM_SEGMENT_H
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#endif
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-
-#define __KERNEL_CS FLAT_RING1_CS
-#define __KERNEL_DS FLAT_RING1_DS
-
-#define __USER_CS FLAT_RING3_CS
-#define __USER_DS FLAT_RING3_DS
-
-#endif
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/smp.h b/xenolinux-2.4.26-sparse/include/asm-xen/smp.h
deleted file mode 100644
index 804b93c332..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/smp.h
+++ /dev/null
@@ -1,102 +0,0 @@
-#ifndef __ASM_SMP_H
-#define __ASM_SMP_H
-
-/*
- * We need the APIC definitions automatically as part of 'smp.h'
- */
-#ifndef __ASSEMBLY__
-#include <linux/config.h>
-#include <linux/threads.h>
-#include <linux/ptrace.h>
-#endif
-
-#ifdef CONFIG_X86_LOCAL_APIC
-#ifndef __ASSEMBLY__
-#include <asm/bitops.h>
-#include <asm/mpspec.h>
-#ifdef CONFIG_X86_IO_APIC
-#include <asm/io_apic.h>
-#endif
-#include <asm/apic.h>
-#endif
-#endif
-
-#ifdef CONFIG_SMP
-#ifndef __ASSEMBLY__
-
-/*
- * Private routines/data
- */
-
-extern void smp_alloc_memory(void);
-extern unsigned long phys_cpu_present_map;
-extern unsigned long cpu_online_map;
-extern volatile unsigned long smp_invalidate_needed;
-extern int pic_mode;
-extern int smp_num_siblings;
-extern int cpu_sibling_map[];
-
-extern void smp_flush_tlb(void);
-extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-extern void smp_send_reschedule(int cpu);
-extern void smp_invalidate_rcv(void); /* Process an NMI */
-extern void (*mtrr_hook) (void);
-extern void zap_low_mappings (void);
-
-/*
- * On x86 all CPUs are mapped 1:1 to the APIC space.
- * This simplifies scheduling and IPI sending and
- * compresses data structures.
- */
-static inline int cpu_logical_map(int cpu)
-{
- return cpu;
-}
-static inline int cpu_number_map(int cpu)
-{
- return cpu;
-}
-
-/*
- * Some lowlevel functions might want to know about
- * the real APIC ID <-> CPU # mapping.
- */
-#define MAX_APICID 256
-extern volatile int cpu_to_physical_apicid[NR_CPUS];
-extern volatile int physical_apicid_to_cpu[MAX_APICID];
-extern volatile int cpu_to_logical_apicid[NR_CPUS];
-extern volatile int logical_apicid_to_cpu[MAX_APICID];
-
-/*
- * General functions that each host system must provide.
- */
-
-extern void smp_boot_cpus(void);
-extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial udelay numbers */
-
-/*
- * This function is needed by all SMP systems. It must _always_ be valid
- * from the initial startup. We map APIC_BASE very early in page_setup(),
- * so this is correct in the x86 case.
- */
-
-#define smp_processor_id() (current->processor)
-
-#endif /* !__ASSEMBLY__ */
-
-#define NO_PROC_ID 0xFF /* No processor magic marker */
-
-/*
- * This magic constant controls our willingness to transfer
- * a process across CPUs. Such a transfer incurs misses on the L1
- * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
- * gut feeling is this will vary by board in value. For a board
- * with separate L2 cache it probably depends also on the RSS, and
- * for a board with shared L2 cache it ought to decay fast as other
- * processes are run.
- */
-
-#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
-
-#endif
-#endif
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/suspend.h b/xenolinux-2.4.26-sparse/include/asm-xen/suspend.h
deleted file mode 100644
index 0a9c8e74a9..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/suspend.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/******************************************************************************
- * suspend.h
- *
- * NB. This file is part of the Xenolinux interface with Xenoserver control
- * software. It can be included in such software without invoking the GPL.
- *
- * Copyright (c) 2003, K A Fraser
- */
-
-#ifndef __ASM_XEN_SUSPEND_H__
-#define __ASM_XEN_SUSPEND_H__
-
-typedef struct suspend_record_st {
- /* To be filled in before resume. */
- extended_start_info_t resume_info;
- /*
- * The number of a machine frame containing, in sequence, the number of
- * each machine frame that contains PFN -> MFN translation table data.
- */
- unsigned long pfn_to_mfn_frame_list;
- /* Number of entries in the PFN -> MFN translation table. */
- unsigned long nr_pfns;
-} suspend_record_t;
-
-#endif /* __ASM_XEN_SUSPEND_H__ */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/synch_bitops.h b/xenolinux-2.4.26-sparse/include/asm-xen/synch_bitops.h
deleted file mode 100644
index 8093de0ac9..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/synch_bitops.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#ifndef __XEN_SYNCH_BITOPS_H__
-#define __XEN_SYNCH_BITOPS_H__
-
-/*
- * Copyright 1992, Linus Torvalds.
- * Heavily modified to provide guaranteed strong synchronisation
- * when communicating with Xen or other guest OSes running on other CPUs.
- */
-
-#include <linux/config.h>
-
-#define ADDR (*(volatile long *) addr)
-
-static __inline__ void synch_set_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__ (
- "lock btsl %1,%0"
- : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__ (
- "lock btrl %1,%0"
- : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_change_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__ (
- "lock btcl %1,%0"
- : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-{
- int oldbit;
- __asm__ __volatile__ (
- "lock btsl %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
- return oldbit;
-}
-
-static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-{
- int oldbit;
- __asm__ __volatile__ (
- "lock btrl %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
- return oldbit;
-}
-
-static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__ (
- "lock btcl %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
- return oldbit;
-}
-
-static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
-{
- return ((1UL << (nr & 31)) &
- (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-{
- int oldbit;
- __asm__ __volatile__ (
- "btl %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
- return oldbit;
-}
-
-#define synch_test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- synch_const_test_bit((nr),(addr)) : \
- synch_var_test_bit((nr),(addr)))
-
-#endif /* __XEN_SYNCH_BITOPS_H__ */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/system.h b/xenolinux-2.4.26-sparse/include/asm-xen/system.h
deleted file mode 100644
index 6a8e352ac5..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/system.h
+++ /dev/null
@@ -1,408 +0,0 @@
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/synch_bitops.h>
-#include <asm/segment.h>
-#include <asm/hypervisor.h>
-#include <asm/evtchn.h>
-
-#ifdef __KERNEL__
-
-struct task_struct;
-extern void FASTCALL(__switch_to(struct task_struct *prev,
- struct task_struct *next));
-
-#define prepare_to_switch() \
-do { \
- struct thread_struct *__t = &current->thread; \
- __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (*(int *)&__t->fs) ); \
- __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (*(int *)&__t->gs) ); \
-} while (0)
-#define switch_to(prev,next,last) do { \
- asm volatile("pushl %%esi\n\t" \
- "pushl %%edi\n\t" \
- "pushl %%ebp\n\t" \
- "movl %%esp,%0\n\t" /* save ESP */ \
- "movl %3,%%esp\n\t" /* restore ESP */ \
- "movl $1f,%1\n\t" /* save EIP */ \
- "pushl %4\n\t" /* restore EIP */ \
- "jmp __switch_to\n" \
- "1:\t" \
- "popl %%ebp\n\t" \
- "popl %%edi\n\t" \
- "popl %%esi\n\t" \
- :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
- "=b" (last) \
- :"m" (next->thread.esp),"m" (next->thread.eip), \
- "a" (prev), "d" (next), \
- "b" (prev)); \
-} while (0)
-
-#define _set_base(addr,base) do { unsigned long __pr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %%dl,%2\n\t" \
- "movb %%dh,%3" \
- :"=&d" (__pr) \
- :"m" (*((addr)+2)), \
- "m" (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while(0)
-
-#define _set_limit(addr,limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while(0)
-
-#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
-
-static inline unsigned long _get_base(char * addr)
-{
- unsigned long __base;
- __asm__("movb %3,%%dh\n\t"
- "movb %2,%%dl\n\t"
- "shll $16,%%edx\n\t"
- "movw %1,%%dx"
- :"=&d" (__base)
- :"m" (*((addr)+2)),
- "m" (*((addr)+4)),
- "m" (*((addr)+7)));
- return __base;
-}
-
-#define get_base(ldt) _get_base( ((char *)&(ldt)) )
-
-/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value) \
- asm volatile("\n" \
- "1:\t" \
- "movl %0,%%" #seg "\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:\t" \
- "pushl $0\n\t" \
- "popl %%" #seg "\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,3b\n" \
- ".previous" \
- : :"m" (*(unsigned int *)&(value)))
-
-/* NB. 'clts' is done for us by Xen during virtual trap. */
-#define clts() ((void)0)
-#define stts() (HYPERVISOR_fpu_taskswitch())
-
-#endif /* __KERNEL__ */
-
-static inline unsigned long get_limit(unsigned long segment)
-{
- unsigned long __limit;
- __asm__("lsll %1,%0"
- :"=r" (__limit):"r" (segment));
- return __limit+1;
-}
-
-#define nop() __asm__ __volatile__ ("nop")
-
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-
-#define tas(ptr) (xchg((ptr),1))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-
-/*
- * The semantics of XCHGCMP8B are a bit strange, this is why
- * there is a loop and the loading of %%eax and %%edx has to
- * be inside. This inlines well in most cases, the cached
- * cost is around ~38 cycles. (in the future we might want
- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
- * might have an implicit FPU-save as a cost, so it's not
- * clear which path to go.)
- *
- * chmxchg8b must be used with the lock prefix here to allow
- * the instruction to be executed atomically, see page 3-102
- * of the instruction set reference 24319102.pdf. We need
- * the reader side to see the coherent 64bit value.
- */
-static inline void __set_64bit (unsigned long long * ptr,
- unsigned int low, unsigned int high)
-{
- __asm__ __volatile__ (
- "\n1:\t"
- "movl (%0), %%eax\n\t"
- "movl 4(%0), %%edx\n\t"
- "lock cmpxchg8b (%0)\n\t"
- "jnz 1b"
- : /* no outputs */
- : "D"(ptr),
- "b"(low),
- "c"(high)
- : "ax","dx","memory");
-}
-
-static inline void __set_64bit_constant (unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
-}
-#define ll_low(x) *(((unsigned int*)&(x))+0)
-#define ll_high(x) *(((unsigned int*)&(x))+1)
-
-static inline void __set_64bit_var (unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr,ll_low(value), ll_high(value));
-}
-
-#define set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
-
-#define _set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#ifdef CONFIG_X86_CMPXCHG
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__("lock cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__("lock cmpxchgw %w1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__("lock cmpxchgl %1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
-#else
-/* Compiling for a 386 proper. Is it worth implementing via cli/sti? */
-#endif
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-
-#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#define rmb() mb()
-
-#ifdef CONFIG_X86_OOSTORE
-#define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#else
-#define wmb() __asm__ __volatile__ ("": : :"memory")
-#endif
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
-
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-#define safe_halt() ((void)0)
-
-/*
- * The use of 'barrier' in the following reflects their use as local-lock
- * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
- * critical operations are executed. All critical operatiosn must complete
- * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
- * includes these barriers, for example.
- */
-
-#define __cli() \
-do { \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
- barrier(); \
-} while (0)
-
-#define __sti() \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- evtchn_do_upcall(NULL); \
-} while (0)
-
-#define __save_flags(x) \
-do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
-} while (0)
-
-#define __restore_flags(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- if ( (_shared->vcpu_data[0].evtchn_upcall_mask = x) == 0 ) { \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- evtchn_do_upcall(NULL); \
- } \
-} while (0)
-
-#define __save_and_cli(x) \
-do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
- barrier(); \
-} while (0)
-
-#define __save_and_sti(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- evtchn_do_upcall(NULL); \
-} while (0)
-
-#define local_irq_save(x) __save_and_cli(x)
-#define local_irq_set(x) __save_and_sti(x)
-#define local_irq_restore(x) __restore_flags(x)
-#define local_irq_disable() __cli()
-#define local_irq_enable() __sti()
-
-
-#ifdef CONFIG_SMP
-#error no SMP
-extern void __global_cli(void);
-extern void __global_sti(void);
-extern unsigned long __global_save_flags(void);
-extern void __global_restore_flags(unsigned long);
-#define cli() __global_cli()
-#define sti() __global_sti()
-#define save_flags(x) ((x)=__global_save_flags())
-#define restore_flags(x) __global_restore_flags(x)
-#define save_and_cli(x) do { save_flags(x); cli(); } while(0);
-#define save_and_sti(x) do { save_flags(x); sti(); } while(0);
-
-#else
-
-#define cli() __cli()
-#define sti() __sti()
-#define save_flags(x) __save_flags(x)
-#define restore_flags(x) __restore_flags(x)
-#define save_and_cli(x) __save_and_cli(x)
-#define save_and_sti(x) __save_and_sti(x)
-
-#endif
-
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
-extern unsigned long dmi_broken;
-extern int is_sony_vaio_laptop;
-
-#define BROKEN_ACPI_Sx 0x0001
-#define BROKEN_INIT_AFTER_S1 0x0002
-#define BROKEN_PNP_BIOS 0x0004
-
-#endif
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/vga.h b/xenolinux-2.4.26-sparse/include/asm-xen/vga.h
deleted file mode 100644
index d0624cf480..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/vga.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Access to VGA videoram
- *
- * (c) 1998 Martin Mares <mj@ucw.cz>
- */
-
-#ifndef _LINUX_ASM_VGA_H_
-#define _LINUX_ASM_VGA_H_
-
-#include <asm/io.h>
-
-extern unsigned char *vgacon_mmap;
-
-static unsigned long VGA_MAP_MEM(unsigned long x)
-{
- if( vgacon_mmap == NULL )
- {
- /* This is our first time in this function. This whole thing
- is a rather grim hack. We know we're going to get asked
- to map a 32KB region between 0xb0000 and 0xb8000 because
- that's what VGAs are. We used the boot time permanent
- fixed map region, and map it to machine pages.
- */
- if( x != 0xb8000 )
- panic("Argghh! VGA Console is weird. 1:%08lx\n",x);
-
- vgacon_mmap = (unsigned char*) bt_ioremap( 0xa0000, 128*1024 );
- return (unsigned long) (vgacon_mmap+x-0xa0000);
- }
- else
- {
- if( x != 0xc0000 && x != 0xa0000 ) /* vidmem_end or charmap fonts */
- panic("Argghh! VGA Console is weird. 2:%08lx\n",x);
- return (unsigned long) (vgacon_mmap+x-0xa0000);
- }
- return 0;
-}
-
-static inline unsigned char vga_readb(unsigned char * x) { return (*(x)); }
-static inline void vga_writeb(unsigned char x, unsigned char *y) { *(y) = (x); }
-
-#endif
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/xen_proc.h b/xenolinux-2.4.26-sparse/include/asm-xen/xen_proc.h
deleted file mode 100644
index d62791e95c..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/xen_proc.h
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#ifndef __ASM_XEN_PROC_H__
-#define __ASM_XEN_PROC_H__
-
-#include <linux/config.h>
-#include <linux/proc_fs.h>
-
-extern struct proc_dir_entry *create_xen_proc_entry(
- const char *name, mode_t mode);
-extern void remove_xen_proc_entry(
- const char *name);
-
-#endif /* __ASM_XEN_PROC_H__ */
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/xor.h b/xenolinux-2.4.26-sparse/include/asm-xen/xor.h
deleted file mode 100644
index 9e6cca8a8a..0000000000
--- a/xenolinux-2.4.26-sparse/include/asm-xen/xor.h
+++ /dev/null
@@ -1,879 +0,0 @@
-/*
- * include/asm-i386/xor.h
- *
- * Optimized RAID-5 checksumming functions for MMX and SSE.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * High-speed RAID5 checksumming functions utilizing MMX instructions.
- * Copyright (C) 1998 Ingo Molnar.
- */
-
-#define FPU_SAVE \
- do { \
- if (!(current->flags & PF_USEDFPU)) \
- clts(); \
- __asm__ __volatile__ ("fsave %0; fwait": "=m"(fpu_save[0])); \
- } while (0)
-
-#define FPU_RESTORE \
- do { \
- __asm__ __volatile__ ("frstor %0": : "m"(fpu_save[0])); \
- if (!(current->flags & PF_USEDFPU)) \
- stts(); \
- } while (0)
-
-#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
-#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
-#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
-#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
-#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
-#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
-
-
-static void
-xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 7;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- ST(i,0) \
- XO1(i+1,1) \
- ST(i+1,1) \
- XO1(i+2,2) \
- ST(i+2,2) \
- XO1(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- FPU_RESTORE;
-}
-
-static void
-xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 7;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- ST(i,0) \
- XO2(i+1,1) \
- ST(i+1,1) \
- XO2(i+2,2) \
- ST(i+2,2) \
- XO2(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- :
- : "memory");
-
- FPU_RESTORE;
-}
-
-static void
-xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 7;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- ST(i,0) \
- XO3(i+1,1) \
- ST(i+1,1) \
- XO3(i+2,2) \
- ST(i+2,2) \
- XO3(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " addl $128, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory");
-
- FPU_RESTORE;
-}
-
-
-static void
-xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 7;
- char fpu_save[108];
-
- FPU_SAVE;
-
- /* need to save/restore p4/p5 manually otherwise gcc's 10 argument
- limit gets exceeded (+ counts as two arguments) */
- __asm__ __volatile__ (
- " pushl %4\n"
- " pushl %5\n"
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- ST(i,0) \
- XO4(i+1,1) \
- ST(i+1,1) \
- XO4(i+2,2) \
- ST(i+2,2) \
- XO4(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " addl $128, %4 ;\n"
- " addl $128, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- " popl %5\n"
- " popl %4\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- FPU_RESTORE;
-}
-
-#undef LD
-#undef XO1
-#undef XO2
-#undef XO3
-#undef XO4
-#undef ST
-#undef BLOCK
-
-static void
-xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 6;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
- " .align 32 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- FPU_RESTORE;
-}
-
-static void
-xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 6;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- :
- : "memory" );
-
- FPU_RESTORE;
-}
-
-static void
-xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 6;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor (%4), %%mm0 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " pxor 8(%4), %%mm1 ;\n"
- " movq %%mm0, (%1) ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " pxor 16(%4), %%mm2 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 24(%4), %%mm3 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " pxor 32(%4), %%mm4 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " pxor 40(%4), %%mm5 ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%4), %%mm6 ;\n"
- " pxor 56(%4), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " addl $64, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory");
-
- FPU_RESTORE;
-}
-
-static void
-xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 6;
- char fpu_save[108];
-
- FPU_SAVE;
-
- /* need to save p4/p5 manually to not exceed gcc's 10 argument limit */
- __asm__ __volatile__ (
- " pushl %4\n"
- " pushl %5\n"
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " pxor (%4), %%mm0 ;\n"
- " pxor 8(%4), %%mm1 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " pxor (%5), %%mm0 ;\n"
- " pxor 8(%5), %%mm1 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 16(%4), %%mm2 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " pxor 16(%5), %%mm2 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 24(%4), %%mm3 ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 24(%5), %%mm3 ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%4), %%mm4 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " pxor 32(%5), %%mm4 ;\n"
- " pxor 40(%4), %%mm5 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " pxor 40(%5), %%mm5 ;\n"
- " pxor 48(%4), %%mm6 ;\n"
- " pxor 56(%4), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%5), %%mm6 ;\n"
- " pxor 56(%5), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " addl $64, %4 ;\n"
- " addl $64, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- " popl %5\n"
- " popl %4\n"
- : "+g" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- FPU_RESTORE;
-}
-
-static struct xor_block_template xor_block_pII_mmx = {
- name: "pII_mmx",
- do_2: xor_pII_mmx_2,
- do_3: xor_pII_mmx_3,
- do_4: xor_pII_mmx_4,
- do_5: xor_pII_mmx_5,
-};
-
-static struct xor_block_template xor_block_p5_mmx = {
- name: "p5_mmx",
- do_2: xor_p5_mmx_2,
- do_3: xor_p5_mmx_3,
- do_4: xor_p5_mmx_4,
- do_5: xor_p5_mmx_5,
-};
-
-#undef FPU_SAVE
-#undef FPU_RESTORE
-
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-#define XMMS_SAVE \
- if (!(current->flags & PF_USEDFPU)) \
- clts(); \
- __asm__ __volatile__ ( \
- "movups %%xmm0,(%1) ;\n\t" \
- "movups %%xmm1,0x10(%1) ;\n\t" \
- "movups %%xmm2,0x20(%1) ;\n\t" \
- "movups %%xmm3,0x30(%1) ;\n\t" \
- : "=&r" (cr0) \
- : "r" (xmm_save) \
- : "memory")
-
-#define XMMS_RESTORE \
- __asm__ __volatile__ ( \
- "sfence ;\n\t" \
- "movups (%1),%%xmm0 ;\n\t" \
- "movups 0x10(%1),%%xmm1 ;\n\t" \
- "movups 0x20(%1),%%xmm2 ;\n\t" \
- "movups 0x30(%1),%%xmm3 ;\n\t" \
- : \
- : "r" (cr0), "r" (xmm_save) \
- : "memory"); \
- if (!(current->flags & PF_USEDFPU)) \
- stts()
-
-#define ALIGN16 __attribute__((aligned(16)))
-
-#define OFFS(x) "16*("#x")"
-#define PF_OFFS(x) "256+16*("#x")"
-#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
-#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
-#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
-#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
-#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
-#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
-#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
-#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
-#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
-#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
-#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
-#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
-#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- PF1(i) \
- PF1(i+2) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r"(p2), "+r"(p3)
- :
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- PF3(i) \
- PF3(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- /* need to save p4/p5 manually to not exceed gcc's 10 argument limit */
- __asm__ __volatile__ (
- " pushl %4\n"
- " pushl %5\n"
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- PF3(i) \
- PF3(i+2) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- PF4(i) \
- PF4(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- XO4(i+1,1) \
- XO4(i+2,2) \
- XO4(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " addl $256, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- " popl %5\n"
- " popl %4\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- XMMS_RESTORE;
-}
-
-static struct xor_block_template xor_block_pIII_sse = {
- name: "pIII_sse",
- do_2: xor_sse_2,
- do_3: xor_sse_3,
- do_4: xor_sse_4,
- do_5: xor_sse_5,
-};
-
-/* Also try the generic routines. */
-#include <asm-generic/xor.h>
-
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES \
- do { \
- xor_speed(&xor_block_8regs); \
- xor_speed(&xor_block_32regs); \
- if (cpu_has_xmm) \
- xor_speed(&xor_block_pIII_sse); \
- if (md_cpu_has_mmx()) { \
- xor_speed(&xor_block_pII_mmx); \
- xor_speed(&xor_block_p5_mmx); \
- } \
- } while (0)
-
-/* We force the use of the SSE xor block because it can write around L2.
- We may also be able to load into the L1 only depending on how the cpu
- deals with a load to a line that is being prefetched. */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
- (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)