aboutsummaryrefslogtreecommitdiffstats
path: root/xenolinux-2.4.21-pre4-sparse/arch
diff options
context:
space:
mode:
Diffstat (limited to 'xenolinux-2.4.21-pre4-sparse/arch')
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c4
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c18
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c22
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_segment.c4
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/network/network.c8
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/init_task.c33
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/ptrace.c454
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/semaphore.c291
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/sys_i386.c256
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/checksum.S496
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/dec_and_lock.c40
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/getuser.S73
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/iodebug.c19
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/memcpy.c19
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/mmx.c399
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/old-checksum.c19
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/strstr.c31
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/usercopy.c190
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/extable.c62
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/pageattr.c175
20 files changed, 31 insertions, 2582 deletions
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c
index bfdfa50c3e..6fd44f1cc1 100644
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c
+++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c
@@ -522,8 +522,8 @@ int __init xlblk_init(void)
blk_ring = (blk_ring_t *)fix_to_virt(FIX_BLKRING_BASE);
blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0;
- error = request_irq(XLBLK_RESPONSE_IRQ, xlblk_response_int, 0,
- "xlblk-response", NULL);
+ error = request_irq(XLBLK_RESPONSE_IRQ, xlblk_response_int,
+ SA_SAMPLE_RANDOM, "xlblk-response", NULL);
if ( error )
{
printk(KERN_ALERT "Could not allocate receive interrupt\n");
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c
index 70990167e3..5e96f4197b 100644
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c
+++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c
@@ -18,7 +18,7 @@ static int xlide_blksize_size[XLIDE_MAX];
static int xlide_hardsect_size[XLIDE_MAX];
static int xlide_max_sectors[XLIDE_MAX];
-struct gendisk *xlide_gendisk;
+struct gendisk *xlide_gendisk = NULL;
static struct block_device_operations xlide_block_fops =
{
@@ -41,6 +41,13 @@ int xlide_init(xen_disk_info_t *xdi)
int i, result, units, minors, disk;
struct gendisk *gd;
+ /* If we don't have any usable IDE devices we may as well bail now. */
+ units = 0;
+ for ( i = 0; i < xdi->count; i++ )
+ if ( IS_IDE_XENDEV(xdi->disks[i].device) &&
+ ((xdi->disks[i].device & XENDEV_IDX_MASK) < 16) ) units++;
+ if ( units == 0 ) return 0;
+
SET_MODULE_OWNER(&xlide_block_fops);
result = register_blkdev(XLIDE_MAJOR, XLIDE_MAJOR_NAME,
@@ -73,13 +80,6 @@ int xlide_init(xen_disk_info_t *xdi)
*/
blk_queue_headactive(BLK_DEFAULT_QUEUE(XLIDE_MAJOR), 0);
- /* If we don't have any usable IDE devices we may as well bail now. */
- units = 0;
- for ( i = 0; i < xdi->count; i++ )
- if ( IS_IDE_XENDEV(xdi->disks[i].device) &&
- ((xdi->disks[i].device & XENDEV_IDX_MASK) < 16) ) units++;
- if ( units == 0 ) return 0;
-
/* We may register up to 16 devices in a sparse identifier space. */
units = 16;
@@ -133,6 +133,8 @@ int xlide_init(xen_disk_info_t *xdi)
void xlide_cleanup(void)
{
+ if ( xlide_gendisk == NULL ) return;
+
blk_cleanup_queue(BLK_DEFAULT_QUEUE(XLIDE_MAJOR));
xlide_gendisk = NULL;
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c
index 75e379f1ba..e4d1007575 100644
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c
+++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c
@@ -42,6 +42,13 @@ int xlscsi_init(xen_disk_info_t *xdi)
int i, result, units, minors, disk;
struct gendisk *gd;
+ /* If we don't have any usable SCSI devices we may as well bail now. */
+ units = 0;
+ for ( i = 0; i < xdi->count; i++ )
+ if ( IS_SCSI_XENDEV(xdi->disks[i].device) &&
+ ((xdi->disks[i].device & XENDEV_IDX_MASK) < 16) ) units++;
+ if ( units == 0 ) return 0;
+
SET_MODULE_OWNER(&xlscsi_block_fops);
result = register_blkdev(XLSCSI_MAJOR, XLSCSI_MAJOR_NAME,
@@ -55,16 +62,16 @@ int xlscsi_init(xen_disk_info_t *xdi)
/* Initialize global arrays. */
for ( i = 0; i < XLSCSI_MAX; i++ )
{
- xlscsi_blksize_size[i] = 512;
+ xlscsi_blksize_size[i] = 1024; //XXX 512;
xlscsi_hardsect_size[i] = 512;
- xlscsi_max_sectors[i] = 128;
+ xlscsi_max_sectors[i] = 128*8; //XXX 128
}
blk_size[XLSCSI_MAJOR] = NULL;
blksize_size[XLSCSI_MAJOR] = xlscsi_blksize_size;
hardsect_size[XLSCSI_MAJOR] = xlscsi_hardsect_size;
max_sectors[XLSCSI_MAJOR] = xlscsi_max_sectors;
- read_ahead[XLSCSI_MAJOR] = 8;
+ read_ahead[XLSCSI_MAJOR] = NULL; //XXX8;
blk_init_queue(BLK_DEFAULT_QUEUE(XLSCSI_MAJOR), do_xlblk_request);
@@ -74,13 +81,6 @@ int xlscsi_init(xen_disk_info_t *xdi)
*/
blk_queue_headactive(BLK_DEFAULT_QUEUE(XLSCSI_MAJOR), 0);
- /* If we don't have any usable SCSI devices we may as well bail now. */
- units = 0;
- for ( i = 0; i < xdi->count; i++ )
- if ( IS_SCSI_XENDEV(xdi->disks[i].device) &&
- ((xdi->disks[i].device & XENDEV_IDX_MASK) < 16) ) units++;
- if ( units == 0 ) return 0;
-
/* We may register up to 16 devices in a sparse identifier space. */
units = 16;
@@ -134,6 +134,8 @@ int xlscsi_init(xen_disk_info_t *xdi)
void xlscsi_cleanup(void)
{
+ if ( xlscsi_gendisk == NULL ) return;
+
blk_cleanup_queue(BLK_DEFAULT_QUEUE(XLSCSI_MAJOR));
xlscsi_gendisk = NULL;
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_segment.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_segment.c
index 8626a36569..6ef1838a2b 100644
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_segment.c
+++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_segment.c
@@ -21,7 +21,7 @@ static int xlseg_blksize_size[XLVIRT_MAX];
static int xlseg_hardsect_size[XLVIRT_MAX];
static int xlseg_max_sectors[XLVIRT_MAX];
-struct gendisk *xlsegment_gendisk;
+struct gendisk *xlsegment_gendisk = NULL;
static xen_disk_info_t xlseg_disk_info;
@@ -148,6 +148,8 @@ int __init xlseg_init(void)
static void __exit xlseg_cleanup(void)
{
+ if ( xlsegment_gendisk == NULL ) return;
+
blk_cleanup_queue(BLK_DEFAULT_QUEUE(XLVIRT_MAJOR));
xlsegment_gendisk = NULL;
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/network/network.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/network/network.c
index 7e85bf5abe..86bbeb921a 100644
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/network/network.c
+++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/network/network.c
@@ -116,8 +116,8 @@ static int network_open(struct net_device *dev)
network_alloc_rx_buffers(dev);
- error = request_irq(NET_RX_IRQ, network_rx_int, 0,
- "net-rx", dev);
+ error = request_irq(NET_RX_IRQ, network_rx_int,
+ SA_SAMPLE_RANDOM, "net-rx", dev);
if ( error )
{
printk(KERN_WARNING "%s: Could not allocate receive interrupt\n",
@@ -126,8 +126,8 @@ static int network_open(struct net_device *dev)
goto fail;
}
- error = request_irq(NET_TX_IRQ, network_tx_int, 0,
- "net-tx", dev);
+ error = request_irq(NET_TX_IRQ, network_tx_int,
+ SA_SAMPLE_RANDOM, "net-tx", dev);
if ( error )
{
printk(KERN_WARNING "%s: Could not allocate transmit interrupt\n",
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/init_task.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/init_task.c
deleted file mode 100644
index 7779809ef2..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/init_task.c
+++ /dev/null
@@ -1,33 +0,0 @@
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/desc.h>
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS;
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-/*
- * Initial task structure.
- *
- * We need to make sure that this is 8192-byte aligned due to the
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
-union task_union init_task_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_TASK(init_task_union.task) };
-
-/*
- * per-CPU TSS segments. Threads are completely 'soft' on Linux,
- * no more per-task TSS's. The TSS size is kept cacheline-aligned
- * so they are allowed to end up in the .data.cacheline_aligned
- * section. Since TSS's are completely CPU-local, we want them
- * on exact cacheline boundaries, to eliminate cacheline ping-pong.
- */
-struct tss_struct init_tss[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = INIT_TSS };
-
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/ptrace.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/ptrace.c
deleted file mode 100644
index b0ba97d198..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/ptrace.c
+++ /dev/null
@@ -1,454 +0,0 @@
-/* ptrace.c */
-/* By Ross Biro 1/23/92 */
-/*
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@valinux.com>, May 2000
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/debugreg.h>
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-/* determines which flags the user has access to. */
-/* 1 = access 0 = no access */
-#define FLAG_MASK 0x00044dd5
-
-/* set's the trap flag. */
-#define TRAP_FLAG 0x100
-
-/*
- * Offset of eflags on child stack..
- */
-#define EFL_OFFSET ((EFL-2)*4-sizeof(struct pt_regs))
-
-/*
- * this routine will get a word off of the processes privileged stack.
- * the offset is how far from the base addr as stored in the TSS.
- * this routine assumes that all the privileged stacks are in our
- * data space.
- */
-static inline int get_stack_long(struct task_struct *task, int offset)
-{
- unsigned char *stack;
-
- stack = (unsigned char *)task->thread.esp0;
- stack += offset;
- return (*((int *)stack));
-}
-
-/*
- * this routine will put a word on the processes privileged stack.
- * the offset is how far from the base addr as stored in the TSS.
- * this routine assumes that all the privileged stacks are in our
- * data space.
- */
-static inline int put_stack_long(struct task_struct *task, int offset,
- unsigned long data)
-{
- unsigned char * stack;
-
- stack = (unsigned char *) task->thread.esp0;
- stack += offset;
- *(unsigned long *) stack = data;
- return 0;
-}
-
-static int putreg(struct task_struct *child,
- unsigned long regno, unsigned long value)
-{
- switch (regno >> 2) {
- case FS:
- if (value && (value & 3) != 3)
- return -EIO;
- child->thread.fs = value;
- return 0;
- case GS:
- if (value && (value & 3) != 3)
- return -EIO;
- child->thread.gs = value;
- return 0;
- case DS:
- case ES:
- if (value && (value & 3) != 3)
- return -EIO;
- value &= 0xffff;
- break;
- case SS:
- case CS:
- if ((value & 3) != 3)
- return -EIO;
- value &= 0xffff;
- break;
- case EFL:
- value &= FLAG_MASK;
- value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
- break;
- }
- if (regno > GS*4)
- regno -= 2*4;
- put_stack_long(child, regno - sizeof(struct pt_regs), value);
- return 0;
-}
-
-static unsigned long getreg(struct task_struct *child,
- unsigned long regno)
-{
- unsigned long retval = ~0UL;
-
- switch (regno >> 2) {
- case FS:
- retval = child->thread.fs;
- break;
- case GS:
- retval = child->thread.gs;
- break;
- case DS:
- case ES:
- case SS:
- case CS:
- retval = 0xffff;
- /* fall through */
- default:
- if (regno > GS*4)
- regno -= 2*4;
- regno = regno - sizeof(struct pt_regs);
- retval &= get_stack_long(child, regno);
- }
- return retval;
-}
-
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure the single step bit is not set.
- */
-void ptrace_disable(struct task_struct *child)
-{
- long tmp;
-
- tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
- put_stack_long(child, EFL_OFFSET, tmp);
-}
-
-asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
-{
- struct task_struct *child;
- struct user * dummy = NULL;
- int i, ret;
-
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
- switch (request) {
- /* when I and D space are separate, these will need to be fixed. */
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp,(unsigned long *) data);
- break;
- }
-
- /* read the word at location addr in the USER area. */
- case PTRACE_PEEKUSR: {
- unsigned long tmp;
-
- ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
- break;
-
- tmp = 0; /* Default return condition */
- if(addr < FRAME_SIZE*sizeof(long))
- tmp = getreg(child, addr);
- if(addr >= (long) &dummy->u_debugreg[0] &&
- addr <= (long) &dummy->u_debugreg[7]){
- addr -= (long) &dummy->u_debugreg[0];
- addr = addr >> 2;
- tmp = child->thread.debugreg[addr];
- }
- ret = put_user(tmp,(unsigned long *) data);
- break;
- }
-
- /* when I and D space are separate, this will have to be fixed. */
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
- ret = -EIO;
- break;
-
- case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
- ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
- break;
-
- if (addr < FRAME_SIZE*sizeof(long)) {
- ret = putreg(child, addr, data);
- break;
- }
- /* We need to be very careful here. We implicitly
- want to modify a portion of the task_struct, and we
- have to be selective about what portions we allow someone
- to modify. */
-
- ret = -EIO;
- if(addr >= (long) &dummy->u_debugreg[0] &&
- addr <= (long) &dummy->u_debugreg[7]){
-
- if(addr == (long) &dummy->u_debugreg[4]) break;
- if(addr == (long) &dummy->u_debugreg[5]) break;
- if(addr < (long) &dummy->u_debugreg[4] &&
- ((unsigned long) data) >= TASK_SIZE-3) break;
-
- if(addr == (long) &dummy->u_debugreg[7]) {
- data &= ~DR_CONTROL_RESERVED;
- for(i=0; i<4; i++)
- if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
- goto out_tsk;
- }
-
- addr -= (long) &dummy->u_debugreg;
- addr = addr >> 2;
- child->thread.debugreg[addr] = data;
- ret = 0;
- }
- break;
-
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT: { /* restart after signal. */
- long tmp;
-
- ret = -EIO;
- if ((unsigned long) data > _NSIG)
- break;
- if (request == PTRACE_SYSCALL)
- child->ptrace |= PT_TRACESYS;
- else
- child->ptrace &= ~PT_TRACESYS;
- child->exit_code = data;
- /* make sure the single step bit is not set. */
- tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
- put_stack_long(child, EFL_OFFSET,tmp);
- wake_up_process(child);
- ret = 0;
- break;
- }
-
-/*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL: {
- long tmp;
-
- ret = 0;
- if (child->state == TASK_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- /* make sure the single step bit is not set. */
- tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
- put_stack_long(child, EFL_OFFSET, tmp);
- wake_up_process(child);
- break;
- }
-
- case PTRACE_SINGLESTEP: { /* set the trap flag. */
- long tmp;
-
- ret = -EIO;
- if ((unsigned long) data > _NSIG)
- break;
- child->ptrace &= ~PT_TRACESYS;
- if ((child->ptrace & PT_DTRACE) == 0) {
- /* Spurious delayed TF traps may occur */
- child->ptrace |= PT_DTRACE;
- }
- tmp = get_stack_long(child, EFL_OFFSET) | TRAP_FLAG;
- put_stack_long(child, EFL_OFFSET, tmp);
- child->exit_code = data;
- /* give it a chance to run. */
- wake_up_process(child);
- ret = 0;
- break;
- }
-
- case PTRACE_DETACH:
- /* detach a process that was attached. */
- ret = ptrace_detach(child, data);
- break;
-
- case PTRACE_GETREGS: { /* Get all gp regs from the child. */
- if (!access_ok(VERIFY_WRITE, (unsigned *)data, FRAME_SIZE*sizeof(long))) {
- ret = -EIO;
- break;
- }
- for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
- __put_user(getreg(child, i),(unsigned long *) data);
- data += sizeof(long);
- }
- ret = 0;
- break;
- }
-
- case PTRACE_SETREGS: { /* Set all gp regs in the child. */
- unsigned long tmp;
- if (!access_ok(VERIFY_READ, (unsigned *)data, FRAME_SIZE*sizeof(long))) {
- ret = -EIO;
- break;
- }
- for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
- __get_user(tmp, (unsigned long *) data);
- putreg(child, i, tmp);
- data += sizeof(long);
- }
- ret = 0;
- break;
- }
-
- case PTRACE_GETFPREGS: { /* Get the child FPU state. */
- if (!access_ok(VERIFY_WRITE, (unsigned *)data,
- sizeof(struct user_i387_struct))) {
- ret = -EIO;
- break;
- }
- ret = 0;
- if ( !child->used_math )
- load_empty_fpu(child);
- get_fpregs((struct user_i387_struct *)data, child);
- break;
- }
-
- case PTRACE_SETFPREGS: { /* Set the child FPU state. */
- if (!access_ok(VERIFY_READ, (unsigned *)data,
- sizeof(struct user_i387_struct))) {
- ret = -EIO;
- break;
- }
- child->used_math = 1;
- set_fpregs(child, (struct user_i387_struct *)data);
- ret = 0;
- break;
- }
-
- case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
- if (!access_ok(VERIFY_WRITE, (unsigned *)data,
- sizeof(struct user_fxsr_struct))) {
- ret = -EIO;
- break;
- }
- if ( !child->used_math )
- load_empty_fpu(child);
- ret = get_fpxregs((struct user_fxsr_struct *)data, child);
- break;
- }
-
- case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
- if (!access_ok(VERIFY_READ, (unsigned *)data,
- sizeof(struct user_fxsr_struct))) {
- ret = -EIO;
- break;
- }
- child->used_math = 1;
- ret = set_fpxregs(child, (struct user_fxsr_struct *)data);
- break;
- }
-
- case PTRACE_SETOPTIONS: {
- if (data & PTRACE_O_TRACESYSGOOD)
- child->ptrace |= PT_TRACESYSGOOD;
- else
- child->ptrace &= ~PT_TRACESYSGOOD;
- ret = 0;
- break;
- }
-
- default:
- ret = -EIO;
- break;
- }
-out_tsk:
- free_task_struct(child);
-out:
- unlock_kernel();
- return ret;
-}
-
-asmlinkage void syscall_trace(void)
-{
- if ((current->ptrace & (PT_PTRACED|PT_TRACESYS)) !=
- (PT_PTRACED|PT_TRACESYS))
- return;
- /* the 0x80 provides a way for the tracing parent to distinguish
- between a syscall stop and SIGTRAP delivery */
- current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0);
- current->state = TASK_STOPPED;
- notify_parent(current, SIGCHLD);
- schedule();
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
-}
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/semaphore.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/semaphore.c
deleted file mode 100644
index fd44981804..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/semaphore.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * i386 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@redhat.com>
- */
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is
- * protected by the semaphore spinlock.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- * - only on a boundary condition do we need to care. When we go
- * from a negative count to a non-negative, we wake people up.
- * - when we go from a non-negative count to a negative do we
- * (a) synchronize with the "sleeper" count and (b) make sure
- * that we're on the wakeup list before we synchronize so that
- * we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
- wake_up(&sem->wait);
-}
-
-static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
-
-void __down(struct semaphore * sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- tsk->state = TASK_UNINTERRUPTIBLE;
- add_wait_queue_exclusive(&sem->wait, &wait);
-
- spin_lock_irq(&semaphore_lock);
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irq(&semaphore_lock);
-
- schedule();
- tsk->state = TASK_UNINTERRUPTIBLE;
- spin_lock_irq(&semaphore_lock);
- }
- spin_unlock_irq(&semaphore_lock);
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
- wake_up(&sem->wait);
-}
-
-int __down_interruptible(struct semaphore * sem)
-{
- int retval = 0;
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- tsk->state = TASK_INTERRUPTIBLE;
- add_wait_queue_exclusive(&sem->wait, &wait);
-
- spin_lock_irq(&semaphore_lock);
- sem->sleepers ++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * With signals pending, this turns into
- * the trylock failure case - we won't be
- * sleeping, and we* can't get the lock as
- * it has contention. Just correct the count
- * and exit.
- */
- if (signal_pending(current)) {
- retval = -EINTR;
- sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
- break;
- }
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock. The
- * "-1" is because we're still hoping to get
- * the lock.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irq(&semaphore_lock);
-
- schedule();
- tsk->state = TASK_INTERRUPTIBLE;
- spin_lock_irq(&semaphore_lock);
- }
- spin_unlock_irq(&semaphore_lock);
- tsk->state = TASK_RUNNING;
- remove_wait_queue(&sem->wait, &wait);
- wake_up(&sem->wait);
- return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- *
- * We could have done the trylock with a
- * single "cmpxchg" without failure cases,
- * but then it wouldn't work on a 386.
- */
-int __down_trylock(struct semaphore * sem)
-{
- int sleepers;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_lock, flags);
- sleepers = sem->sleepers + 1;
- sem->sleepers = 0;
-
- /*
- * Add "everybody else" and us into it. They aren't
- * playing, because we own the spinlock.
- */
- if (!atomic_add_negative(sleepers, &sem->count))
- wake_up(&sem->wait);
-
- spin_unlock_irqrestore(&semaphore_lock, flags);
- return 1;
-}
-
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- *
- * %ecx contains the semaphore pointer on entry. Save the C-clobbered
- * registers (%eax, %edx and %ecx) except %eax when used as a return
- * value..
- */
-asm(
-".text\n"
-".align 4\n"
-".globl __down_failed\n"
-"__down_failed:\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "pushl %ebp\n\t"
- "movl %esp,%ebp\n\t"
-#endif
- "pushl %eax\n\t"
- "pushl %edx\n\t"
- "pushl %ecx\n\t"
- "call __down\n\t"
- "popl %ecx\n\t"
- "popl %edx\n\t"
- "popl %eax\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "movl %ebp,%esp\n\t"
- "popl %ebp\n\t"
-#endif
- "ret"
-);
-
-asm(
-".text\n"
-".align 4\n"
-".globl __down_failed_interruptible\n"
-"__down_failed_interruptible:\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "pushl %ebp\n\t"
- "movl %esp,%ebp\n\t"
-#endif
- "pushl %edx\n\t"
- "pushl %ecx\n\t"
- "call __down_interruptible\n\t"
- "popl %ecx\n\t"
- "popl %edx\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "movl %ebp,%esp\n\t"
- "popl %ebp\n\t"
-#endif
- "ret"
-);
-
-asm(
-".text\n"
-".align 4\n"
-".globl __down_failed_trylock\n"
-"__down_failed_trylock:\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "pushl %ebp\n\t"
- "movl %esp,%ebp\n\t"
-#endif
- "pushl %edx\n\t"
- "pushl %ecx\n\t"
- "call __down_trylock\n\t"
- "popl %ecx\n\t"
- "popl %edx\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "movl %ebp,%esp\n\t"
- "popl %ebp\n\t"
-#endif
- "ret"
-);
-
-asm(
-".text\n"
-".align 4\n"
-".globl __up_wakeup\n"
-"__up_wakeup:\n\t"
- "pushl %eax\n\t"
- "pushl %edx\n\t"
- "pushl %ecx\n\t"
- "call __up\n\t"
- "popl %ecx\n\t"
- "popl %edx\n\t"
- "popl %eax\n\t"
- "ret"
-);
-
-/*
- * rw spinlock fallbacks
- */
-#if defined(CONFIG_SMP)
-asm(
-".text\n"
-".align 4\n"
-".globl __write_lock_failed\n"
-"__write_lock_failed:\n\t"
- LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n"
-"1: rep; nop\n\t"
- "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
- "jne 1b\n\t"
- LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
- "jnz __write_lock_failed\n\t"
- "ret"
-);
-
-asm(
-".text\n"
-".align 4\n"
-".globl __read_lock_failed\n"
-"__read_lock_failed:\n\t"
- LOCK "incl (%eax)\n"
-"1: rep; nop\n\t"
- "cmpl $1,(%eax)\n\t"
- "js 1b\n\t"
- LOCK "decl (%eax)\n\t"
- "js __read_lock_failed\n\t"
- "ret"
-);
-#endif
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/sys_i386.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/sys_i386.c
deleted file mode 100644
index 5fd6910b9c..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/sys_i386.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * linux/arch/i386/kernel/sys_i386.c
- *
- * This file contains various random system calls that
- * have a non-standard calling sequence on the Linux/i386
- * platform.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/stat.h>
-#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/utsname.h>
-
-#include <asm/uaccess.h>
-#include <asm/ipc.h>
-
-/*
- * sys_pipe() is the normal C calling standard for creating
- * a pipe. It's not the way Unix traditionally does this, though.
- */
-asmlinkage int sys_pipe(unsigned long * fildes)
-{
- int fd[2];
- int error;
-
- error = do_pipe(fd);
- if (!error) {
- if (copy_to_user(fildes, fd, 2*sizeof(int)))
- error = -EFAULT;
- }
- return error;
-}
-
-/* common code for old and new mmaps */
-static inline long do_mmap2(
- unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
-
-/*
- * Perform the select(nd, in, out, ex, tv) and mmap() system
- * calls. Linux/i386 didn't use to be able to handle more than
- * 4 system call parameters, so these system calls used a memory
- * block for parameter passing..
- */
-
-struct mmap_arg_struct {
- unsigned long addr;
- unsigned long len;
- unsigned long prot;
- unsigned long flags;
- unsigned long fd;
- unsigned long offset;
-};
-
-asmlinkage int old_mmap(struct mmap_arg_struct *arg)
-{
- struct mmap_arg_struct a;
- int err = -EFAULT;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- goto out;
-
- err = -EINVAL;
- if (a.offset & ~PAGE_MASK)
- goto out;
-
- err = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
-out:
- return err;
-}
-
-
-extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
-
-struct sel_arg_struct {
- unsigned long n;
- fd_set *inp, *outp, *exp;
- struct timeval *tvp;
-};
-
-asmlinkage int old_select(struct sel_arg_struct *arg)
-{
- struct sel_arg_struct a;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- return -EFAULT;
- /* sys_select() does the appropriate kernel locking */
- return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
-}
-
-/*
- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
- *
- * This is really horribly ugly.
- */
-asmlinkage int sys_ipc (uint call, int first, int second,
- int third, void *ptr, long fifth)
-{
- int version, ret;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- switch (call) {
- case SEMOP:
- return sys_semop (first, (struct sembuf *)ptr, second);
- case SEMGET:
- return sys_semget (first, second, third);
- case SEMCTL: {
- union semun fourth;
- if (!ptr)
- return -EINVAL;
- if (get_user(fourth.__pad, (void **) ptr))
- return -EFAULT;
- return sys_semctl (first, second, third, fourth);
- }
-
- case MSGSND:
- return sys_msgsnd (first, (struct msgbuf *) ptr,
- second, third);
- case MSGRCV:
- switch (version) {
- case 0: {
- struct ipc_kludge tmp;
- if (!ptr)
- return -EINVAL;
-
- if (copy_from_user(&tmp,
- (struct ipc_kludge *) ptr,
- sizeof (tmp)))
- return -EFAULT;
- return sys_msgrcv (first, tmp.msgp, second,
- tmp.msgtyp, third);
- }
- default:
- return sys_msgrcv (first,
- (struct msgbuf *) ptr,
- second, fifth, third);
- }
- case MSGGET:
- return sys_msgget ((key_t) first, second);
- case MSGCTL:
- return sys_msgctl (first, second, (struct msqid_ds *) ptr);
-
- case SHMAT:
- switch (version) {
- default: {
- ulong raddr;
- ret = sys_shmat (first, (char *) ptr, second, &raddr);
- if (ret)
- return ret;
- return put_user (raddr, (ulong *) third);
- }
- case 1: /* iBCS2 emulator entry point */
- if (!segment_eq(get_fs(), get_ds()))
- return -EINVAL;
- return sys_shmat (first, (char *) ptr, second, (ulong *) third);
- }
- case SHMDT:
- return sys_shmdt ((char *)ptr);
- case SHMGET:
- return sys_shmget (first, second, third);
- case SHMCTL:
- return sys_shmctl (first, second,
- (struct shmid_ds *) ptr);
- default:
- return -EINVAL;
- }
-}
-
-/*
- * Old cruft
- */
-asmlinkage int sys_uname(struct old_utsname * name)
-{
- int err;
- if (!name)
- return -EFAULT;
- down_read(&uts_sem);
- err=copy_to_user(name, &system_utsname, sizeof (*name));
- up_read(&uts_sem);
- return err?-EFAULT:0;
-}
-
-asmlinkage int sys_olduname(struct oldold_utsname * name)
-{
- int error;
-
- if (!name)
- return -EFAULT;
- if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
- return -EFAULT;
-
- down_read(&uts_sem);
-
- error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
- error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
- error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
- error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
- error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
- error |= __put_user(0,name->release+__OLD_UTS_LEN);
- error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
- error |= __put_user(0,name->version+__OLD_UTS_LEN);
- error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
- error |= __put_user(0,name->machine+__OLD_UTS_LEN);
-
- up_read(&uts_sem);
-
- error = error ? -EFAULT : 0;
-
- return error;
-}
-
-asmlinkage int sys_pause(void)
-{
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- return -ERESTARTNOHAND;
-}
-
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/checksum.S b/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/checksum.S
deleted file mode 100644
index 94c7867ddc..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/checksum.S
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * IP/TCP/UDP checksumming routines
- *
- * Authors: Jorge Cwik, <jorge@laser.satlink.net>
- * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- * Tom May, <ftom@netcom.com>
- * Pentium Pro/II routines:
- * Alexander Kjeldaas <astor@guardian.no>
- * Finn Arne Gangstad <finnag@guardian.no>
- * Lots of code moved from tcp.c and ip.c; see those files
- * for more names.
- *
- * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
- * handling.
- * Andi Kleen, add zeroing on error
- * converted to pure assembler
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <asm/errno.h>
-
-/*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
-
-/*
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
- */
-
-.text
-.align 4
-.globl csum_partial
-
-#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
-
- /*
- * Experiments with Ethernet and SLIP connections show that buff
- * is aligned on either a 2-byte or 4-byte boundary. We get at
- * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
- * Fortunately, it is easy to convert 2-byte alignment to 4-byte
- * alignment for the unrolled loop.
- */
-csum_partial:
- pushl %esi
- pushl %ebx
- movl 20(%esp),%eax # Function arg: unsigned int sum
- movl 16(%esp),%ecx # Function arg: int len
- movl 12(%esp),%esi # Function arg: unsigned char *buff
- testl $3, %esi # Check alignment.
- jz 2f # Jump if alignment is ok.
- testl $1, %esi # Check alignment.
- jz 10f # Jump if alignment is boundary of 2bytes.
-
- # buf is odd
- dec %ecx
- jl 8f
- movzbl (%esi), %ebx
- adcl %ebx, %eax
- roll $8, %eax
- inc %esi
- testl $2, %esi
- jz 2f
-10:
- subl $2, %ecx # Alignment uses up two bytes.
- jae 1f # Jump if we had at least two bytes.
- addl $2, %ecx # ecx was < 2. Deal with it.
- jmp 4f
-1: movw (%esi), %bx
- addl $2, %esi
- addw %bx, %ax
- adcl $0, %eax
-2:
- movl %ecx, %edx
- shrl $5, %ecx
- jz 2f
- testl %esi, %esi
-1: movl (%esi), %ebx
- adcl %ebx, %eax
- movl 4(%esi), %ebx
- adcl %ebx, %eax
- movl 8(%esi), %ebx
- adcl %ebx, %eax
- movl 12(%esi), %ebx
- adcl %ebx, %eax
- movl 16(%esi), %ebx
- adcl %ebx, %eax
- movl 20(%esi), %ebx
- adcl %ebx, %eax
- movl 24(%esi), %ebx
- adcl %ebx, %eax
- movl 28(%esi), %ebx
- adcl %ebx, %eax
- lea 32(%esi), %esi
- dec %ecx
- jne 1b
- adcl $0, %eax
-2: movl %edx, %ecx
- andl $0x1c, %edx
- je 4f
- shrl $2, %edx # This clears CF
-3: adcl (%esi), %eax
- lea 4(%esi), %esi
- dec %edx
- jne 3b
- adcl $0, %eax
-4: andl $3, %ecx
- jz 7f
- cmpl $2, %ecx
- jb 5f
- movw (%esi),%cx
- leal 2(%esi),%esi
- je 6f
- shll $16,%ecx
-5: movb (%esi),%cl
-6: addl %ecx,%eax
- adcl $0, %eax
-7:
- testl $1, 12(%esp)
- jz 8f
- roll $8, %eax
-8:
- popl %ebx
- popl %esi
- ret
-
-#else
-
-/* Version for PentiumII/PPro */
-
-csum_partial:
- pushl %esi
- pushl %ebx
- movl 20(%esp),%eax # Function arg: unsigned int sum
- movl 16(%esp),%ecx # Function arg: int len
- movl 12(%esp),%esi # Function arg: const unsigned char *buf
-
- testl $3, %esi
- jnz 25f
-10:
- movl %ecx, %edx
- movl %ecx, %ebx
- andl $0x7c, %ebx
- shrl $7, %ecx
- addl %ebx,%esi
- shrl $2, %ebx
- negl %ebx
- lea 45f(%ebx,%ebx,2), %ebx
- testl %esi, %esi
- jmp *%ebx
-
- # Handle 2-byte-aligned regions
-20: addw (%esi), %ax
- lea 2(%esi), %esi
- adcl $0, %eax
- jmp 10b
-25:
- testl $1, %esi
- jz 30f
- # buf is odd
- dec %ecx
- jl 90f
- movzbl (%esi), %ebx
- addl %ebx, %eax
- adcl $0, %eax
- roll $8, %eax
- inc %esi
- testl $2, %esi
- jz 10b
-
-30: subl $2, %ecx
- ja 20b
- je 32f
- addl $2, %ecx
- jz 80f
- movzbl (%esi),%ebx # csumming 1 byte, 2-aligned
- addl %ebx, %eax
- adcl $0, %eax
- jmp 80f
-32:
- addw (%esi), %ax # csumming 2 bytes, 2-aligned
- adcl $0, %eax
- jmp 80f
-
-40:
- addl -128(%esi), %eax
- adcl -124(%esi), %eax
- adcl -120(%esi), %eax
- adcl -116(%esi), %eax
- adcl -112(%esi), %eax
- adcl -108(%esi), %eax
- adcl -104(%esi), %eax
- adcl -100(%esi), %eax
- adcl -96(%esi), %eax
- adcl -92(%esi), %eax
- adcl -88(%esi), %eax
- adcl -84(%esi), %eax
- adcl -80(%esi), %eax
- adcl -76(%esi), %eax
- adcl -72(%esi), %eax
- adcl -68(%esi), %eax
- adcl -64(%esi), %eax
- adcl -60(%esi), %eax
- adcl -56(%esi), %eax
- adcl -52(%esi), %eax
- adcl -48(%esi), %eax
- adcl -44(%esi), %eax
- adcl -40(%esi), %eax
- adcl -36(%esi), %eax
- adcl -32(%esi), %eax
- adcl -28(%esi), %eax
- adcl -24(%esi), %eax
- adcl -20(%esi), %eax
- adcl -16(%esi), %eax
- adcl -12(%esi), %eax
- adcl -8(%esi), %eax
- adcl -4(%esi), %eax
-45:
- lea 128(%esi), %esi
- adcl $0, %eax
- dec %ecx
- jge 40b
- movl %edx, %ecx
-50: andl $3, %ecx
- jz 80f
-
- # Handle the last 1-3 bytes without jumping
- notl %ecx # 1->2, 2->1, 3->0, higher bits are masked
- movl $0xffffff,%ebx # by the shll and shrl instructions
- shll $3,%ecx
- shrl %cl,%ebx
- andl -128(%esi),%ebx # esi is 4-aligned so should be ok
- addl %ebx,%eax
- adcl $0,%eax
-80:
- testl $1, 12(%esp)
- jz 90f
- roll $8, %eax
-90:
- popl %ebx
- popl %esi
- ret
-
-#endif
-
-/*
-unsigned int csum_partial_copy_generic (const char *src, char *dst,
- int len, int sum, int *src_err_ptr, int *dst_err_ptr)
- */
-
-/*
- * Copy from ds while checksumming, otherwise like csum_partial
- *
- * The macros SRC and DST specify the type of access for the instruction.
- * thus we can call a custom exception handler for all access types.
- *
- * FIXME: could someone double-check whether I haven't mixed up some SRC and
- * DST definitions? It's damn hard to trigger all cases. I hope I got
- * them all but there's no guarantee.
- */
-
-#define SRC(y...) \
- 9999: y; \
- .section __ex_table, "a"; \
- .long 9999b, 6001f ; \
- .previous
-
-#define DST(y...) \
- 9999: y; \
- .section __ex_table, "a"; \
- .long 9999b, 6002f ; \
- .previous
-
-.align 4
-.globl csum_partial_copy_generic
-
-#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
-
-#define ARGBASE 16
-#define FP 12
-
-csum_partial_copy_generic:
- subl $4,%esp
- pushl %edi
- pushl %esi
- pushl %ebx
- movl ARGBASE+16(%esp),%eax # sum
- movl ARGBASE+12(%esp),%ecx # len
- movl ARGBASE+4(%esp),%esi # src
- movl ARGBASE+8(%esp),%edi # dst
-
- testl $2, %edi # Check alignment.
- jz 2f # Jump if alignment is ok.
- subl $2, %ecx # Alignment uses up two bytes.
- jae 1f # Jump if we had at least two bytes.
- addl $2, %ecx # ecx was < 2. Deal with it.
- jmp 4f
-SRC(1: movw (%esi), %bx )
- addl $2, %esi
-DST( movw %bx, (%edi) )
- addl $2, %edi
- addw %bx, %ax
- adcl $0, %eax
-2:
- movl %ecx, FP(%esp)
- shrl $5, %ecx
- jz 2f
- testl %esi, %esi
-SRC(1: movl (%esi), %ebx )
-SRC( movl 4(%esi), %edx )
- adcl %ebx, %eax
-DST( movl %ebx, (%edi) )
- adcl %edx, %eax
-DST( movl %edx, 4(%edi) )
-
-SRC( movl 8(%esi), %ebx )
-SRC( movl 12(%esi), %edx )
- adcl %ebx, %eax
-DST( movl %ebx, 8(%edi) )
- adcl %edx, %eax
-DST( movl %edx, 12(%edi) )
-
-SRC( movl 16(%esi), %ebx )
-SRC( movl 20(%esi), %edx )
- adcl %ebx, %eax
-DST( movl %ebx, 16(%edi) )
- adcl %edx, %eax
-DST( movl %edx, 20(%edi) )
-
-SRC( movl 24(%esi), %ebx )
-SRC( movl 28(%esi), %edx )
- adcl %ebx, %eax
-DST( movl %ebx, 24(%edi) )
- adcl %edx, %eax
-DST( movl %edx, 28(%edi) )
-
- lea 32(%esi), %esi
- lea 32(%edi), %edi
- dec %ecx
- jne 1b
- adcl $0, %eax
-2: movl FP(%esp), %edx
- movl %edx, %ecx
- andl $0x1c, %edx
- je 4f
- shrl $2, %edx # This clears CF
-SRC(3: movl (%esi), %ebx )
- adcl %ebx, %eax
-DST( movl %ebx, (%edi) )
- lea 4(%esi), %esi
- lea 4(%edi), %edi
- dec %edx
- jne 3b
- adcl $0, %eax
-4: andl $3, %ecx
- jz 7f
- cmpl $2, %ecx
- jb 5f
-SRC( movw (%esi), %cx )
- leal 2(%esi), %esi
-DST( movw %cx, (%edi) )
- leal 2(%edi), %edi
- je 6f
- shll $16,%ecx
-SRC(5: movb (%esi), %cl )
-DST( movb %cl, (%edi) )
-6: addl %ecx, %eax
- adcl $0, %eax
-7:
-5000:
-
-# Exception handler:
-.section .fixup, "ax"
-
-6001:
- movl ARGBASE+20(%esp), %ebx # src_err_ptr
- movl $-EFAULT, (%ebx)
-
- # zero the complete destination - computing the rest
- # is too much work
- movl ARGBASE+8(%esp), %edi # dst
- movl ARGBASE+12(%esp), %ecx # len
- xorl %eax,%eax
- rep ; stosb
-
- jmp 5000b
-
-6002:
- movl ARGBASE+24(%esp), %ebx # dst_err_ptr
- movl $-EFAULT,(%ebx)
- jmp 5000b
-
-.previous
-
- popl %ebx
- popl %esi
- popl %edi
- popl %ecx # equivalent to addl $4,%esp
- ret
-
-#else
-
-/* Version for PentiumII/PPro */
-
-#define ROUND1(x) \
- SRC(movl x(%esi), %ebx ) ; \
- addl %ebx, %eax ; \
- DST(movl %ebx, x(%edi) ) ;
-
-#define ROUND(x) \
- SRC(movl x(%esi), %ebx ) ; \
- adcl %ebx, %eax ; \
- DST(movl %ebx, x(%edi) ) ;
-
-#define ARGBASE 12
-
-csum_partial_copy_generic:
- pushl %ebx
- pushl %edi
- pushl %esi
- movl ARGBASE+4(%esp),%esi #src
- movl ARGBASE+8(%esp),%edi #dst
- movl ARGBASE+12(%esp),%ecx #len
- movl ARGBASE+16(%esp),%eax #sum
-# movl %ecx, %edx
- movl %ecx, %ebx
- movl %esi, %edx
- shrl $6, %ecx
- andl $0x3c, %ebx
- negl %ebx
- subl %ebx, %esi
- subl %ebx, %edi
- lea -1(%esi),%edx
- andl $-32,%edx
- lea 3f(%ebx,%ebx), %ebx
- testl %esi, %esi
- jmp *%ebx
-1: addl $64,%esi
- addl $64,%edi
- SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
- ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
- ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
- ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
- ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4)
-3: adcl $0,%eax
- addl $64, %edx
- dec %ecx
- jge 1b
-4: movl ARGBASE+12(%esp),%edx #len
- andl $3, %edx
- jz 7f
- cmpl $2, %edx
- jb 5f
-SRC( movw (%esi), %dx )
- leal 2(%esi), %esi
-DST( movw %dx, (%edi) )
- leal 2(%edi), %edi
- je 6f
- shll $16,%edx
-5:
-SRC( movb (%esi), %dl )
-DST( movb %dl, (%edi) )
-6: addl %edx, %eax
- adcl $0, %eax
-7:
-.section .fixup, "ax"
-6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
- movl $-EFAULT, (%ebx)
- # zero the complete destination (computing the rest is too much work)
- movl ARGBASE+8(%esp),%edi # dst
- movl ARGBASE+12(%esp),%ecx # len
- xorl %eax,%eax
- rep; stosb
- jmp 7b
-6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
- movl $-EFAULT, (%ebx)
- jmp 7b
-.previous
-
- popl %esi
- popl %edi
- popl %ebx
- ret
-
-#undef ROUND
-#undef ROUND1
-
-#endif
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/dec_and_lock.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/dec_and_lock.c
deleted file mode 100644
index ffd4869001..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/dec_and_lock.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * x86 version of "atomic_dec_and_lock()" using
- * the atomic "cmpxchg" instruction.
- *
- * (For CPU's lacking cmpxchg, we use the slow
- * generic version, and this one never even gets
- * compiled).
- */
-
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-
-int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
-{
- int counter;
- int newcount;
-
-repeat:
- counter = atomic_read(atomic);
- newcount = counter-1;
-
- if (!newcount)
- goto slow_path;
-
- asm volatile("lock; cmpxchgl %1,%2"
- :"=a" (newcount)
- :"r" (newcount), "m" (atomic->counter), "0" (counter));
-
- /* If the above failed, "eax" will have changed */
- if (newcount != counter)
- goto repeat;
- return 0;
-
-slow_path:
- spin_lock(lock);
- if (atomic_dec_and_test(atomic))
- return 1;
- spin_unlock(lock);
- return 0;
-}
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/getuser.S b/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/getuser.S
deleted file mode 100644
index c244721e70..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/getuser.S
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * __get_user functions.
- *
- * (C) Copyright 1998 Linus Torvalds
- *
- * These functions have a non-standard call interface
- * to make them more efficient, especially as they
- * return an error value in addition to the "real"
- * return value.
- */
-
-/*
- * __get_user_X
- *
- * Inputs: %eax contains the address
- *
- * Outputs: %eax is error code (0 or -EFAULT)
- * %edx contains zero-extended value
- *
- * These functions should not modify any other registers,
- * as they get called from within inline assembly.
- */
-
-addr_limit = 12
-
-.text
-.align 4
-.globl __get_user_1
-__get_user_1:
- movl %esp,%edx
- andl $0xffffe000,%edx
- cmpl addr_limit(%edx),%eax
- jae bad_get_user
-1: movzbl (%eax),%edx
- xorl %eax,%eax
- ret
-
-.align 4
-.globl __get_user_2
-__get_user_2:
- addl $1,%eax
- movl %esp,%edx
- jc bad_get_user
- andl $0xffffe000,%edx
- cmpl addr_limit(%edx),%eax
- jae bad_get_user
-2: movzwl -1(%eax),%edx
- xorl %eax,%eax
- ret
-
-.align 4
-.globl __get_user_4
-__get_user_4:
- addl $3,%eax
- movl %esp,%edx
- jc bad_get_user
- andl $0xffffe000,%edx
- cmpl addr_limit(%edx),%eax
- jae bad_get_user
-3: movl -3(%eax),%edx
- xorl %eax,%eax
- ret
-
-bad_get_user:
- xorl %edx,%edx
- movl $-14,%eax
- ret
-
-.section __ex_table,"a"
- .long 1b,bad_get_user
- .long 2b,bad_get_user
- .long 3b,bad_get_user
-.previous
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/iodebug.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/iodebug.c
deleted file mode 100644
index 701a07fe72..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/iodebug.c
+++ /dev/null
@@ -1,19 +0,0 @@
-#include <asm/io.h>
-
-void * __io_virt_debug(unsigned long x, const char *file, int line)
-{
- if (x < PAGE_OFFSET) {
- printk("io mapaddr 0x%05lx not valid at %s:%d!\n", x, file, line);
- return __va(x);
- }
- return (void *)x;
-}
-
-unsigned long __io_phys_debug(unsigned long x, const char *file, int line)
-{
- if (x < PAGE_OFFSET) {
- printk("io mapaddr 0x%05lx not valid at %s:%d!\n", x, file, line);
- return x;
- }
- return __pa(x);
-}
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/memcpy.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/memcpy.c
deleted file mode 100644
index 4cb37b6e50..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/memcpy.c
+++ /dev/null
@@ -1,19 +0,0 @@
-#include <linux/config.h>
-#include <linux/string.h>
-
-#undef memcpy
-#undef memset
-
-void * memcpy(void * to, const void * from, size_t n)
-{
-#ifdef CONFIG_X86_USE_3DNOW
- return __memcpy3d(to, from, n);
-#else
- return __memcpy(to, from, n);
-#endif
-}
-
-void * memset(void * s, int c, size_t count)
-{
- return __memset(s, c, count);
-}
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/mmx.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/mmx.c
deleted file mode 100644
index 2e52b5d5af..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/mmx.c
+++ /dev/null
@@ -1,399 +0,0 @@
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-
-#include <asm/i387.h>
-#include <asm/hardirq.h>
-
-
-/*
- * MMX 3DNow! library helper functions
- *
- * To do:
- * We can use MMX just for prefetch in IRQ's. This may be a win.
- * (reported so on K6-III)
- * We should use a better code neutral filler for the short jump
- * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ??
- * We also want to clobber the filler register so we dont get any
- * register forwarding stalls on the filler.
- *
- * Add *user handling. Checksums are not a win with MMX on any CPU
- * tested so far for any MMX solution figured.
- *
- * 22/09/2000 - Arjan van de Ven
- * Improved for non-egineering-sample Athlons
- *
- */
-
-void *_mmx_memcpy(void *to, const void *from, size_t len)
-{
- void *p;
- int i;
-
- if (in_interrupt())
- return __memcpy(to, from, len);
-
- p = to;
- i = len >> 6; /* len/64 */
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- "1: prefetch (%0)\n" /* This set is 28 bytes */
- " prefetch 64(%0)\n"
- " prefetch 128(%0)\n"
- " prefetch 192(%0)\n"
- " prefetch 256(%0)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b, 3b\n"
- ".previous"
- : : "r" (from) );
-
-
- for(; i>5; i--)
- {
- __asm__ __volatile__ (
- "1: prefetch 320(%0)\n"
- "2: movq (%0), %%mm0\n"
- " movq 8(%0), %%mm1\n"
- " movq 16(%0), %%mm2\n"
- " movq 24(%0), %%mm3\n"
- " movq %%mm0, (%1)\n"
- " movq %%mm1, 8(%1)\n"
- " movq %%mm2, 16(%1)\n"
- " movq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm0\n"
- " movq 40(%0), %%mm1\n"
- " movq 48(%0), %%mm2\n"
- " movq 56(%0), %%mm3\n"
- " movq %%mm0, 32(%1)\n"
- " movq %%mm1, 40(%1)\n"
- " movq %%mm2, 48(%1)\n"
- " movq %%mm3, 56(%1)\n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b, 3b\n"
- ".previous"
- : : "r" (from), "r" (to) : "memory");
- from+=64;
- to+=64;
- }
-
- for(; i>0; i--)
- {
- __asm__ __volatile__ (
- " movq (%0), %%mm0\n"
- " movq 8(%0), %%mm1\n"
- " movq 16(%0), %%mm2\n"
- " movq 24(%0), %%mm3\n"
- " movq %%mm0, (%1)\n"
- " movq %%mm1, 8(%1)\n"
- " movq %%mm2, 16(%1)\n"
- " movq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm0\n"
- " movq 40(%0), %%mm1\n"
- " movq 48(%0), %%mm2\n"
- " movq 56(%0), %%mm3\n"
- " movq %%mm0, 32(%1)\n"
- " movq %%mm1, 40(%1)\n"
- " movq %%mm2, 48(%1)\n"
- " movq %%mm3, 56(%1)\n"
- : : "r" (from), "r" (to) : "memory");
- from+=64;
- to+=64;
- }
- /*
- * Now do the tail of the block
- */
- __memcpy(to, from, len&63);
- kernel_fpu_end();
- return p;
-}
-
-#ifdef CONFIG_MK7
-
-/*
- * The K7 has streaming cache bypass load/store. The Cyrix III, K6 and
- * other MMX using processors do not.
- */
-
-static void fast_clear_page(void *page)
-{
- int i;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- " pxor %%mm0, %%mm0\n" : :
- );
-
- for(i=0;i<4096/64;i++)
- {
- __asm__ __volatile__ (
- " movntq %%mm0, (%0)\n"
- " movntq %%mm0, 8(%0)\n"
- " movntq %%mm0, 16(%0)\n"
- " movntq %%mm0, 24(%0)\n"
- " movntq %%mm0, 32(%0)\n"
- " movntq %%mm0, 40(%0)\n"
- " movntq %%mm0, 48(%0)\n"
- " movntq %%mm0, 56(%0)\n"
- : : "r" (page) : "memory");
- page+=64;
- }
- /* since movntq is weakly-ordered, a "sfence" is needed to become
- * ordered again.
- */
- __asm__ __volatile__ (
- " sfence \n" : :
- );
- kernel_fpu_end();
-}
-
-static void fast_copy_page(void *to, void *from)
-{
- int i;
-
- kernel_fpu_begin();
-
- /* maybe the prefetch stuff can go before the expensive fnsave...
- * but that is for later. -AV
- */
- __asm__ __volatile__ (
- "1: prefetch (%0)\n"
- " prefetch 64(%0)\n"
- " prefetch 128(%0)\n"
- " prefetch 192(%0)\n"
- " prefetch 256(%0)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b, 3b\n"
- ".previous"
- : : "r" (from) );
-
- for(i=0; i<(4096-320)/64; i++)
- {
- __asm__ __volatile__ (
- "1: prefetch 320(%0)\n"
- "2: movq (%0), %%mm0\n"
- " movntq %%mm0, (%1)\n"
- " movq 8(%0), %%mm1\n"
- " movntq %%mm1, 8(%1)\n"
- " movq 16(%0), %%mm2\n"
- " movntq %%mm2, 16(%1)\n"
- " movq 24(%0), %%mm3\n"
- " movntq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm4\n"
- " movntq %%mm4, 32(%1)\n"
- " movq 40(%0), %%mm5\n"
- " movntq %%mm5, 40(%1)\n"
- " movq 48(%0), %%mm6\n"
- " movntq %%mm6, 48(%1)\n"
- " movq 56(%0), %%mm7\n"
- " movntq %%mm7, 56(%1)\n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b, 3b\n"
- ".previous"
- : : "r" (from), "r" (to) : "memory");
- from+=64;
- to+=64;
- }
- for(i=(4096-320)/64; i<4096/64; i++)
- {
- __asm__ __volatile__ (
- "2: movq (%0), %%mm0\n"
- " movntq %%mm0, (%1)\n"
- " movq 8(%0), %%mm1\n"
- " movntq %%mm1, 8(%1)\n"
- " movq 16(%0), %%mm2\n"
- " movntq %%mm2, 16(%1)\n"
- " movq 24(%0), %%mm3\n"
- " movntq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm4\n"
- " movntq %%mm4, 32(%1)\n"
- " movq 40(%0), %%mm5\n"
- " movntq %%mm5, 40(%1)\n"
- " movq 48(%0), %%mm6\n"
- " movntq %%mm6, 48(%1)\n"
- " movq 56(%0), %%mm7\n"
- " movntq %%mm7, 56(%1)\n"
- : : "r" (from), "r" (to) : "memory");
- from+=64;
- to+=64;
- }
- /* since movntq is weakly-ordered, a "sfence" is needed to become
- * ordered again.
- */
- __asm__ __volatile__ (
- " sfence \n" : :
- );
- kernel_fpu_end();
-}
-
-#else
-
-/*
- * Generic MMX implementation without K7 specific streaming
- */
-
-static void fast_clear_page(void *page)
-{
- int i;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- " pxor %%mm0, %%mm0\n" : :
- );
-
- for(i=0;i<4096/128;i++)
- {
- __asm__ __volatile__ (
- " movq %%mm0, (%0)\n"
- " movq %%mm0, 8(%0)\n"
- " movq %%mm0, 16(%0)\n"
- " movq %%mm0, 24(%0)\n"
- " movq %%mm0, 32(%0)\n"
- " movq %%mm0, 40(%0)\n"
- " movq %%mm0, 48(%0)\n"
- " movq %%mm0, 56(%0)\n"
- " movq %%mm0, 64(%0)\n"
- " movq %%mm0, 72(%0)\n"
- " movq %%mm0, 80(%0)\n"
- " movq %%mm0, 88(%0)\n"
- " movq %%mm0, 96(%0)\n"
- " movq %%mm0, 104(%0)\n"
- " movq %%mm0, 112(%0)\n"
- " movq %%mm0, 120(%0)\n"
- : : "r" (page) : "memory");
- page+=128;
- }
-
- kernel_fpu_end();
-}
-
-static void fast_copy_page(void *to, void *from)
-{
- int i;
-
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- "1: prefetch (%0)\n"
- " prefetch 64(%0)\n"
- " prefetch 128(%0)\n"
- " prefetch 192(%0)\n"
- " prefetch 256(%0)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b, 3b\n"
- ".previous"
- : : "r" (from) );
-
- for(i=0; i<4096/64; i++)
- {
- __asm__ __volatile__ (
- "1: prefetch 320(%0)\n"
- "2: movq (%0), %%mm0\n"
- " movq 8(%0), %%mm1\n"
- " movq 16(%0), %%mm2\n"
- " movq 24(%0), %%mm3\n"
- " movq %%mm0, (%1)\n"
- " movq %%mm1, 8(%1)\n"
- " movq %%mm2, 16(%1)\n"
- " movq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm0\n"
- " movq 40(%0), %%mm1\n"
- " movq 48(%0), %%mm2\n"
- " movq 56(%0), %%mm3\n"
- " movq %%mm0, 32(%1)\n"
- " movq %%mm1, 40(%1)\n"
- " movq %%mm2, 48(%1)\n"
- " movq %%mm3, 56(%1)\n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b, 3b\n"
- ".previous"
- : : "r" (from), "r" (to) : "memory");
- from+=64;
- to+=64;
- }
- kernel_fpu_end();
-}
-
-
-#endif
-
-/*
- * Favour MMX for page clear and copy.
- */
-
-static void slow_zero_page(void * page)
-{
- int d0, d1;
- __asm__ __volatile__( \
- "cld\n\t" \
- "rep ; stosl" \
- : "=&c" (d0), "=&D" (d1)
- :"a" (0),"1" (page),"0" (1024)
- :"memory");
-}
-
-void mmx_clear_page(void * page)
-{
- if(in_interrupt())
- slow_zero_page(page);
- else
- fast_clear_page(page);
-}
-
-static void slow_copy_page(void *to, void *from)
-{
- int d0, d1, d2;
- __asm__ __volatile__( \
- "cld\n\t" \
- "rep ; movsl" \
- : "=&c" (d0), "=&D" (d1), "=&S" (d2) \
- : "0" (1024),"1" ((long) to),"2" ((long) from) \
- : "memory");
-}
-
-
-void mmx_copy_page(void *to, void *from)
-{
- if(in_interrupt())
- slow_copy_page(to, from);
- else
- fast_copy_page(to, from);
-}
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/old-checksum.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/old-checksum.c
deleted file mode 100644
index ae3a38043a..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/old-checksum.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * FIXME: old compatibility stuff, will be removed soon.
- */
-
-#include <net/checksum.h>
-
-unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum)
-{
- int src_err=0, dst_err=0;
-
- sum = csum_partial_copy_generic ( src, dst, len, sum, &src_err, &dst_err);
-
- if (src_err || dst_err)
- printk("old csum_partial_copy_fromuser(), tell mingo to convert me.\n");
-
- return sum;
-}
-
-
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/strstr.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/strstr.c
deleted file mode 100644
index a3dafbf59d..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/strstr.c
+++ /dev/null
@@ -1,31 +0,0 @@
-#include <linux/string.h>
-
-char * strstr(const char * cs,const char * ct)
-{
-int d0, d1;
-register char * __res;
-__asm__ __volatile__(
- "movl %6,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
- "notl %%ecx\n\t"
- "decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */
- "movl %%ecx,%%edx\n"
- "1:\tmovl %6,%%edi\n\t"
- "movl %%esi,%%eax\n\t"
- "movl %%edx,%%ecx\n\t"
- "repe\n\t"
- "cmpsb\n\t"
- "je 2f\n\t" /* also works for empty string, see above */
- "xchgl %%eax,%%esi\n\t"
- "incl %%esi\n\t"
- "cmpb $0,-1(%%eax)\n\t"
- "jne 1b\n\t"
- "xorl %%eax,%%eax\n\t"
- "2:"
- :"=a" (__res), "=&c" (d0), "=&S" (d1)
- :"0" (0), "1" (0xffffffff), "2" (cs), "g" (ct)
- :"dx", "di");
-return __res;
-}
-
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/usercopy.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/usercopy.c
deleted file mode 100644
index d81fa81af8..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/lib/usercopy.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * User address space access functions.
- * The non inlined parts of asm-i386/uaccess.h are here.
- *
- * Copyright 1997 Andi Kleen <ak@muc.de>
- * Copyright 1997 Linus Torvalds
- */
-#include <linux/config.h>
-#include <asm/uaccess.h>
-#include <asm/mmx.h>
-
-#ifdef CONFIG_X86_USE_3DNOW_AND_WORKS
-
-unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- {
- if(n<512)
- __copy_user(to,from,n);
- else
- mmx_copy_user(to,from,n);
- }
- return n;
-}
-
-unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_READ, from, n))
- {
- if(n<512)
- __copy_user_zeroing(to,from,n);
- else
- mmx_copy_user_zeroing(to, from, n);
- }
- else
- memset(to, 0, n);
- return n;
-}
-
-#else
-
-unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
-{
- prefetch(from);
- if (access_ok(VERIFY_WRITE, to, n))
- __copy_user(to,from,n);
- return n;
-}
-
-unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
-{
- prefetchw(to);
- if (access_ok(VERIFY_READ, from, n))
- __copy_user_zeroing(to,from,n);
- else
- memset(to, 0, n);
- return n;
-}
-
-#endif
-
-/*
- * Copy a null terminated string from userspace.
- */
-
-#define __do_strncpy_from_user(dst,src,count,res) \
-do { \
- int __d0, __d1, __d2; \
- __asm__ __volatile__( \
- " testl %1,%1\n" \
- " jz 2f\n" \
- "0: lodsb\n" \
- " stosb\n" \
- " testb %%al,%%al\n" \
- " jz 1f\n" \
- " decl %1\n" \
- " jnz 0b\n" \
- "1: subl %1,%0\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl %5,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- ".previous" \
- : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \
- "=&D" (__d2) \
- : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
- : "memory"); \
-} while (0)
-
-long
-__strncpy_from_user(char *dst, const char *src, long count)
-{
- long res;
- __do_strncpy_from_user(dst, src, count, res);
- return res;
-}
-
-long
-strncpy_from_user(char *dst, const char *src, long count)
-{
- long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
- __do_strncpy_from_user(dst, src, count, res);
- return res;
-}
-
-
-/*
- * Zero Userspace
- */
-
-#define __do_clear_user(addr,size) \
-do { \
- int __d0; \
- __asm__ __volatile__( \
- "0: rep; stosl\n" \
- " movl %2,%0\n" \
- "1: rep; stosb\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: lea 0(%2,%0,4),%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,2b\n" \
- ".previous" \
- : "=&c"(size), "=&D" (__d0) \
- : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
-} while (0)
-
-unsigned long
-clear_user(void *to, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- __do_clear_user(to, n);
- return n;
-}
-
-unsigned long
-__clear_user(void *to, unsigned long n)
-{
- __do_clear_user(to, n);
- return n;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-
-long strnlen_user(const char *s, long n)
-{
- unsigned long mask = -__addr_ok(s);
- unsigned long res, tmp;
-
- __asm__ __volatile__(
- " testl %0, %0\n"
- " jz 3f\n"
- " andl %0,%%ecx\n"
- "0: repne; scasb\n"
- " setne %%al\n"
- " subl %%ecx,%0\n"
- " addl %0,%%eax\n"
- "1:\n"
- ".section .fixup,\"ax\"\n"
- "2: xorl %%eax,%%eax\n"
- " jmp 1b\n"
- "3: movb $1,%%al\n"
- " jmp 1b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b,2b\n"
- ".previous"
- :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp)
- :"0" (n), "1" (s), "2" (0), "3" (mask)
- :"cc");
- return res & mask;
-}
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/extable.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/extable.c
deleted file mode 100644
index 4cd9f064c3..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/extable.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * linux/arch/i386/mm/extable.c
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <asm/uaccess.h>
-
-extern const struct exception_table_entry __start___ex_table[];
-extern const struct exception_table_entry __stop___ex_table[];
-
-static inline unsigned long
-search_one_table(const struct exception_table_entry *first,
- const struct exception_table_entry *last,
- unsigned long value)
-{
- while (first <= last) {
- const struct exception_table_entry *mid;
- long diff;
-
- mid = (last - first) / 2 + first;
- diff = mid->insn - value;
- if (diff == 0)
- return mid->fixup;
- else if (diff < 0)
- first = mid+1;
- else
- last = mid-1;
- }
- return 0;
-}
-
-extern spinlock_t modlist_lock;
-
-unsigned long
-search_exception_table(unsigned long addr)
-{
- unsigned long ret = 0;
-
-#ifndef CONFIG_MODULES
- /* There is only the kernel to search. */
- ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
- return ret;
-#else
- unsigned long flags;
- /* The kernel is the last "module" -- no need to treat it special. */
- struct module *mp;
-
- spin_lock_irqsave(&modlist_lock, flags);
- for (mp = module_list; mp != NULL; mp = mp->next) {
- if (mp->ex_table_start == NULL || !(mp->flags&(MOD_RUNNING|MOD_INITIALIZING)))
- continue;
- ret = search_one_table(mp->ex_table_start,
- mp->ex_table_end - 1, addr);
- if (ret)
- break;
- }
- spin_unlock_irqrestore(&modlist_lock, flags);
- return ret;
-#endif
-}
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/pageattr.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/pageattr.c
deleted file mode 100644
index 8d3b3e3856..0000000000
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/pageattr.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright 2002 Andi Kleen, SuSE Labs.
- * Thanks to Ben LaHaise for precious feedback.
- */
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-
-/* Should move most of this stuff into the appropiate includes */
-#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
-
-static inline pte_t *lookup_address(unsigned long address)
-{
- pmd_t *pmd;
- pgd_t *pgd = pgd_offset(&init_mm, address);
-
- if (pgd_none(*pgd))
- return NULL;
- if (pgd_val(*pgd) & _PAGE_PSE)
- return (pte_t *)pgd;
- pmd = pmd_offset(pgd, address);
- if (pmd_none(*pmd))
- return NULL;
- if (pmd_val(*pmd) & _PAGE_PSE)
- return (pte_t *)pmd;
- return pte_offset(pmd, address);
-}
-
-static struct page *split_large_page(unsigned long address, pgprot_t prot)
-{
- int i;
- unsigned long addr;
- struct page *base = alloc_pages(GFP_KERNEL, 0);
- pte_t *pbase;
- if (!base)
- return NULL;
- address = __pa(address);
- addr = address & LARGE_PAGE_MASK;
- pbase = (pte_t *)page_address(base);
- for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
- pbase[i] = mk_pte_phys(addr,
- addr == address ? prot : PAGE_KERNEL);
- }
- return base;
-}
-
-static void flush_kernel_map(void * address)
-{
- if (!test_bit(X86_FEATURE_SELFSNOOP, boot_cpu_data.x86_capability)) {
- /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
- if (boot_cpu_data.x86_model >= 4)
- asm volatile("wbinvd":::"memory");
- }
-
- /* Do global flush here to work around large page flushing errata
- in some early Athlons */
- __flush_tlb_all();
-}
-
-static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
-{
- set_pte_atomic(kpte, pte); /* change init_mm */
-#ifndef CONFIG_X86_PAE
- {
- struct list_head *l;
- spin_lock(&mmlist_lock);
- list_for_each(l, &init_mm.mmlist) {
- struct mm_struct *mm = list_entry(l, struct mm_struct, mmlist);
- pmd_t *pmd = pmd_offset(pgd_offset(mm, address), address);
- set_pte_atomic((pte_t *)pmd, pte);
- }
- spin_unlock(&mmlist_lock);
- }
-#endif
-}
-
-/* no more special protections in this 2/4MB area - revert to a
- large page again. */
-static inline void revert_page(struct page *kpte_page, unsigned long address)
-{
- pte_t *linear = (pte_t *)
- pmd_offset(pgd_offset(&init_mm, address), address);
- set_pmd_pte(linear, address,
- mk_pte_phys(__pa(address & LARGE_PAGE_MASK),
- MAKE_GLOBAL(_KERNPG_TABLE|_PAGE_PSE)));
-}
-
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * This should be used when a page is mapped with a different caching policy
- * than write-back somewhere - some CPUs do not like it when mappings with
- * different caching policies exist. This changes the page attributes of the
- * in kernel linear mapping too.
- *
- * The caller needs to ensure that there are no conflicting mappings elsewhere.
- * This function only deals with the kernel linear map.
- * When page is in highmem it must never be kmap'ed.
- */
-static int
-__change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage)
-{
- pte_t *kpte;
- unsigned long address;
- struct page *kpte_page;
-
-#ifdef CONFIG_HIGHMEM
- if (page >= highmem_start_page)
- BUG();
-#endif
- address = (unsigned long)page_address(page);
- kpte = lookup_address(address);
- if (!kpte)
- return -EINVAL;
- kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
- if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
- pte_t old = *kpte;
- pte_t standard = mk_pte(page, PAGE_KERNEL);
-
- set_pte_atomic(kpte, mk_pte(page, prot));
- if (pte_same(old,standard))
- atomic_inc(&kpte_page->count);
- } else {
- struct page *split = split_large_page(address, prot);
- if (!split)
- return -ENOMEM;
- atomic_inc(&kpte_page->count);
- set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
- }
- } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
- set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
- atomic_dec(&kpte_page->count);
- }
-
- if (cpu_has_pse && (atomic_read(&kpte_page->count) == 1)) {
- *oldpage = kpte_page;
- revert_page(kpte_page, address);
- }
- return 0;
-}
-
-int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
- int err = 0;
- struct page *fpage;
- int i;
-
- down_write(&init_mm.mmap_sem);
- for (i = 0; i < numpages; i++, page++) {
- fpage = NULL;
- err = __change_page_attr(page, prot, &fpage);
- if (err)
- break;
- if (fpage || i == numpages-1) {
- void *address = page_address(page);
-#ifdef CONFIG_SMP
- smp_call_function(flush_kernel_map, address, 1, 1);
-#endif
- flush_kernel_map(address);
- if (fpage)
- __free_page(fpage);
- }
- }
- up_write(&init_mm.mmap_sem);
- return err;
-}
-
-EXPORT_SYMBOL(change_page_attr);