aboutsummaryrefslogtreecommitdiffstats
path: root/old/xenolinux-2.4.16-sparse/arch/xeno/mm
diff options
context:
space:
mode:
Diffstat (limited to 'old/xenolinux-2.4.16-sparse/arch/xeno/mm')
-rw-r--r--old/xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile7
-rw-r--r--old/xenolinux-2.4.16-sparse/arch/xeno/mm/extable.c62
-rw-r--r--old/xenolinux-2.4.16-sparse/arch/xeno/mm/fault.c401
-rw-r--r--old/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c137
-rw-r--r--old/xenolinux-2.4.16-sparse/arch/xeno/mm/hypervisor.c168
-rw-r--r--old/xenolinux-2.4.16-sparse/arch/xeno/mm/init.c293
-rw-r--r--old/xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c26
7 files changed, 1094 insertions, 0 deletions
diff --git a/old/xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile
new file mode 100644
index 0000000000..2818511a1a
--- /dev/null
+++ b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile
@@ -0,0 +1,7 @@
+
+
+O_TARGET := mm.o
+
+obj-y := init.o fault.o extable.o hypervisor.o get_unmapped_area.o mmu_context.o
+
+include $(TOPDIR)/Rules.make
diff --git a/old/xenolinux-2.4.16-sparse/arch/xeno/mm/extable.c b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/extable.c
new file mode 100644
index 0000000000..4cd9f064c3
--- /dev/null
+++ b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/extable.c
@@ -0,0 +1,62 @@
+/*
+ * linux/arch/i386/mm/extable.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/uaccess.h>
+
+extern const struct exception_table_entry __start___ex_table[];
+extern const struct exception_table_entry __stop___ex_table[];
+
+static inline unsigned long
+search_one_table(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ while (first <= last) {
+ const struct exception_table_entry *mid;
+ long diff;
+
+ mid = (last - first) / 2 + first;
+ diff = mid->insn - value;
+ if (diff == 0)
+ return mid->fixup;
+ else if (diff < 0)
+ first = mid+1;
+ else
+ last = mid-1;
+ }
+ return 0;
+}
+
+extern spinlock_t modlist_lock;
+
+unsigned long
+search_exception_table(unsigned long addr)
+{
+ unsigned long ret = 0;
+
+#ifndef CONFIG_MODULES
+ /* There is only the kernel to search. */
+ ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
+ return ret;
+#else
+ unsigned long flags;
+ /* The kernel is the last "module" -- no need to treat it special. */
+ struct module *mp;
+
+ spin_lock_irqsave(&modlist_lock, flags);
+ for (mp = module_list; mp != NULL; mp = mp->next) {
+ if (mp->ex_table_start == NULL || !(mp->flags&(MOD_RUNNING|MOD_INITIALIZING)))
+ continue;
+ ret = search_one_table(mp->ex_table_start,
+ mp->ex_table_end - 1, addr);
+ if (ret)
+ break;
+ }
+ spin_unlock_irqrestore(&modlist_lock, flags);
+ return ret;
+#endif
+}
diff --git a/old/xenolinux-2.4.16-sparse/arch/xeno/mm/fault.c b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/fault.c
new file mode 100644
index 0000000000..41d966901a
--- /dev/null
+++ b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/fault.c
@@ -0,0 +1,401 @@
+/*
+ * linux/arch/i386/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h> /* For unblank_screen() */
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/hardirq.h>
+
+extern void die(const char *,struct pt_regs *,long);
+
+extern int console_loglevel;
+
+pgd_t *cur_pgd;
+
+/*
+ * Ugly, ugly, but the goto's result in better assembly..
+ */
+int __verify_write(const void * addr, unsigned long size)
+{
+ struct vm_area_struct * vma;
+ unsigned long start = (unsigned long) addr;
+
+ if (!size)
+ return 1;
+
+ vma = find_vma(current->mm, start);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start > start)
+ goto check_stack;
+
+good_area:
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ size--;
+ size += start & ~PAGE_MASK;
+ size >>= PAGE_SHIFT;
+ start &= PAGE_MASK;
+
+ for (;;) {
+ survive:
+ {
+ int fault = handle_mm_fault(current->mm, vma, start, 1);
+ if (!fault)
+ goto bad_area;
+ if (fault < 0)
+ goto out_of_memory;
+ }
+ if (!size)
+ break;
+ size--;
+ start += PAGE_SIZE;
+ if (start < vma->vm_end)
+ continue;
+ vma = vma->vm_next;
+ if (!vma || vma->vm_start != start)
+ goto bad_area;
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;;
+ }
+ return 1;
+
+check_stack:
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, start) == 0)
+ goto good_area;
+
+bad_area:
+ return 0;
+
+out_of_memory:
+ if (current->pid == 1) {
+ current->policy |= SCHED_YIELD;
+ schedule();
+ goto survive;
+ }
+ goto bad_area;
+}
+
+extern spinlock_t timerlist_lock;
+
+/*
+ * Unlock any spinlocks which will prevent us from getting the
+ * message out (timerlist_lock is acquired through the
+ * console unblank code)
+ */
+void bust_spinlocks(int yes)
+{
+ spin_lock_init(&timerlist_lock);
+ if (yes) {
+ oops_in_progress = 1;
+ } else {
+ int loglevel_save = console_loglevel;
+#ifdef CONFIG_VT
+ unblank_screen();
+#endif
+ oops_in_progress = 0;
+ /*
+ * OK, the message is on the console. Now we call printk()
+ * without oops_in_progress set so that printk will give klogd
+ * a poke. Hold onto your hats...
+ */
+ console_loglevel = 15; /* NMI oopser may have shut the console up */
+ printk(" ");
+ console_loglevel = loglevel_save;
+ }
+}
+
+void do_BUG(const char *file, int line)
+{
+ bust_spinlocks(1);
+ printk("kernel BUG at %s:%d!\n", file, line);
+}
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * error_code:
+ * bit 0 == 0 means no page found, 1 means protection fault
+ * bit 1 == 0 means read, 1 means write
+ * bit 2 == 0 means kernel, 1 means user-mode
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs,
+ unsigned long error_code,
+ unsigned long address)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ unsigned long page;
+ unsigned long fixup;
+ int write;
+ siginfo_t info;
+
+ /* Set the "privileged fault" bit to something sane. */
+ error_code &= 3;
+ error_code |= (regs->xcs & 2) << 1;
+
+#if PT_UPDATE_DEBUG > 0
+ if ( (error_code == 0) && (address >= TASK_SIZE) )
+ {
+ unsigned long paddr = __pa(address);
+ int i;
+ for ( i = 0; i < pt_update_queue_idx; i++ )
+ {
+ if ( update_debug_queue[i].ptr == paddr )
+ {
+ printk("XXX now(EIP=%08lx:ptr=%08lx) "
+ "then(%s/%d:p/v=%08lx/%08lx)\n",
+ regs->eip, address,
+ update_debug_queue[i].file,
+ update_debug_queue[i].line,
+ update_debug_queue[i].ptr,
+ update_debug_queue[i].val);
+ }
+ }
+ }
+#endif
+
+ if ( flush_page_update_queue() != 0 ) return;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * This verifies that the fault happens in kernel space
+ * (error_code & 4) == 0, and that the fault was not a
+ * protection error (error_code & 1) == 0.
+ */
+ if (address >= TASK_SIZE && !(error_code & 5))
+ goto vmalloc_fault;
+
+ mm = tsk->mm;
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_interrupt() || !mm)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (error_code & 4) {
+ /*
+ * accessing the stack below %esp is always a bug.
+ * The "+ 32" is there due to some instructions (like
+ * pusha) doing post-decrement on the stack and that
+ * doesn't show up until later..
+ */
+ if (address + 32 < regs->esp)
+ goto bad_area;
+ }
+ if (expand_stack(vma, address))
+ goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (error_code & 3) {
+ default: /* 3: write, present */
+ /* fall through */
+ case 2: /* write, not present */
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ write++;
+ break;
+ case 1: /* read, present */
+ goto bad_area;
+ case 0: /* read, not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ survive:
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ switch (handle_mm_fault(mm, vma, address, write)) {
+ case 1:
+ tsk->min_flt++;
+ break;
+ case 2:
+ tsk->maj_flt++;
+ break;
+ case 0:
+ goto do_sigbus;
+ default:
+ goto out_of_memory;
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & 4) {
+ tsk->thread.cr2 = address;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if ((fixup = search_exception_table(regs->eip)) != 0) {
+ regs->eip = fixup;
+ return;
+ }
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ bust_spinlocks(1);
+
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n",address);
+ printk(" printing eip:\n");
+ printk("%08lx\n", regs->eip);
+ page = ((unsigned long *) cur_pgd)[address >> 22];
+ printk(KERN_ALERT "*pde=%08lx(%08lx)\n", page, machine_to_phys(page));
+ if (page & 1) {
+ page &= PAGE_MASK;
+ address &= 0x003ff000;
+ page = machine_to_phys(page);
+ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
+ printk(KERN_ALERT "*pte=%08lx(%08lx)\n", page,
+ machine_to_phys(page));
+ }
+ die("Oops", regs, error_code);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (tsk->pid == 1) {
+ tsk->policy |= SCHED_YIELD;
+ schedule();
+ down_read(&mm->mmap_sem);
+ goto survive;
+ }
+ printk("VM: killing process %s\n", tsk->comm);
+ if (error_code & 4)
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ tsk->thread.cr2 = address;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!(error_code & 4))
+ goto no_context;
+ return;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int offset = __pgd_offset(address);
+ pgd_t *pgd, *pgd_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd = offset + cur_pgd;
+ pgd_k = init_mm.pgd + offset;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+ set_pgd(pgd, *pgd_k);
+
+ pmd = pmd_offset(pgd, address);
+ pmd_k = pmd_offset(pgd_k, address);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+ set_pmd(pmd, *pmd_k);
+ XENO_flush_page_update_queue(); /* flush PMD update */
+
+ pte_k = pte_offset(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+}
diff --git a/old/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c
new file mode 100644
index 0000000000..a7b4447589
--- /dev/null
+++ b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c
@@ -0,0 +1,137 @@
+
+#include <linux/slab.h>
+#include <linux/shm.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/personality.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+/*
+static int direct_mapped(unsigned long addr)
+{
+ direct_mmap_node_t * node;
+ struct list_head * curr;
+ struct list_head * direct_list = &current->mm->context.direct_list;
+
+ curr = direct_list->next;
+ while(curr != direct_list){
+ node = list_entry(curr, direct_mmap_node_t, list);
+ if(node->addr == addr)
+ break;
+ curr = curr->next;
+ }
+
+ if(curr == direct_list)
+ return 0;
+
+ return 1;
+}
+*/
+/*
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct vm_area_struct *vma;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+ addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
+
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+
+ if ((!vma || addr + len <= vma->vm_start) && !direct_mapped(addr))
+ return addr;
+
+ addr = vma->vm_end;
+ }
+}
+*/
+struct list_head *find_direct(struct list_head *list, unsigned long addr)
+{
+ struct list_head * curr;
+ struct list_head * direct_list = &current->mm->context.direct_list;
+ direct_mmap_node_t * node;
+
+ for ( curr = direct_list->next; curr != direct_list; curr = curr->next )
+ {
+ node = list_entry(curr, direct_mmap_node_t, list);
+ if ( node->vm_start >= addr ) break;
+ }
+
+ return curr;
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long
+addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct vm_area_struct *vma;
+ direct_mmap_node_t * node;
+ struct list_head * curr;
+ struct list_head * direct_list = &current->mm->context.direct_list;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if ( addr )
+ {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+ curr = find_direct(direct_list, addr);
+ node = list_entry(curr, direct_mmap_node_t, list);
+ if ( (TASK_SIZE - len >= addr) &&
+ (!vma || addr + len <= vma->vm_start) &&
+ ((curr == direct_list) || addr + len <= node->vm_start) )
+ return addr;
+ }
+
+ addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
+
+
+ /* Find first VMA and direct_map nodes with vm_start > addr */
+ vma = find_vma(current->mm, addr);
+ curr = find_direct(direct_list, addr);
+ node = list_entry(curr, direct_mmap_node_t, list);
+
+ for ( ; ; )
+ {
+ if ( TASK_SIZE - len < addr ) return -ENOMEM;
+
+ if ( vma && ((curr == direct_list) || (vma->vm_start < node->vm_start)))
+ {
+ /* Do we fit before VMA node? */
+ if ( addr + len <= vma->vm_start ) return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
+ }
+ else if ( curr != direct_list )
+ {
+ /* Do we fit before direct_map node? */
+ if ( addr + len <= node->vm_start) return addr;
+ addr = node->vm_end;
+ curr = curr->next;
+ node = list_entry(curr, direct_mmap_node_t, list);
+ }
+ else
+ {
+ /* !vma && curr == direct_list */
+ return addr;
+ }
+ }
+}
diff --git a/old/xenolinux-2.4.16-sparse/arch/xeno/mm/hypervisor.c b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/hypervisor.c
new file mode 100644
index 0000000000..b051684aa2
--- /dev/null
+++ b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/hypervisor.c
@@ -0,0 +1,168 @@
+/******************************************************************************
+ * xeno/mm/hypervisor.c
+ *
+ * Update page tables via the hypervisor.
+ *
+ * Copyright (c) 2002, K A Fraser
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <asm/hypervisor.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#define QUEUE_SIZE 2048
+static page_update_request_t update_queue[QUEUE_SIZE];
+unsigned int pt_update_queue_idx = 0;
+#define idx pt_update_queue_idx
+
+#if PT_UPDATE_DEBUG > 0
+page_update_debug_t update_debug_queue[QUEUE_SIZE] = {{0}};
+#undef queue_l1_entry_update
+#undef queue_l2_entry_update
+static void DEBUG_allow_pt_reads(void)
+{
+ pte_t *pte;
+ page_update_request_t update;
+ int i;
+ for ( i = idx-1; i >= 0; i-- )
+ {
+ pte = update_debug_queue[i].ptep;
+ if ( pte == NULL ) continue;
+ update_debug_queue[i].ptep = NULL;
+ update.ptr = phys_to_machine(__pa(pte));
+ update.val = update_debug_queue[i].pteval;
+ HYPERVISOR_pt_update(&update, 1);
+ }
+}
+static void DEBUG_disallow_pt_read(unsigned long pa)
+{
+ pte_t *pte;
+ pmd_t *pmd;
+ pgd_t *pgd;
+ unsigned long pteval;
+ /*
+ * We may fault because of an already outstanding update.
+ * That's okay -- it'll get fixed up in the fault handler.
+ */
+ page_update_request_t update;
+ unsigned long va = (unsigned long)__va(pa);
+ pgd = pgd_offset_k(va);
+ pmd = pmd_offset(pgd, va);
+ pte = pte_offset(pmd, va);
+ update.ptr = phys_to_machine(__pa(pte));
+ pteval = *(unsigned long *)pte;
+ update.val = pteval & ~_PAGE_PRESENT;
+ HYPERVISOR_pt_update(&update, 1);
+ update_debug_queue[idx].ptep = pte;
+ update_debug_queue[idx].pteval = pteval;
+}
+#endif
+
+#if PT_UPDATE_DEBUG > 1
+#undef queue_pt_switch
+#undef queue_tlb_flush
+#undef queue_invlpg
+#undef queue_pgd_pin
+#undef queue_pgd_unpin
+#undef queue_pte_pin
+#undef queue_pte_unpin
+#endif
+
+
+/*
+ * This is the current pagetable base pointer, which is updated
+ * on context switch.
+ */
+unsigned long pt_baseptr;
+
+void _flush_page_update_queue(void)
+{
+ if ( idx == 0 ) return;
+#if PT_UPDATE_DEBUG > 1
+ printk("Flushing %d entries from pt update queue\n", idx);
+#endif
+#if PT_UPDATE_DEBUG > 0
+ DEBUG_allow_pt_reads();
+#endif
+ HYPERVISOR_pt_update(update_queue, idx);
+ idx = 0;
+}
+
+static void increment_index(void)
+{
+ if ( ++idx == QUEUE_SIZE ) _flush_page_update_queue();
+}
+
+void queue_l1_entry_update(unsigned long ptr, unsigned long val)
+{
+#if PT_UPDATE_DEBUG > 0
+ DEBUG_disallow_pt_read(ptr);
+#endif
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].val = val;
+ increment_index();
+}
+
+void queue_l2_entry_update(unsigned long ptr, unsigned long val)
+{
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].val = val;
+ increment_index();
+}
+
+void queue_pt_switch(unsigned long ptr)
+{
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
+ update_queue[idx].val = PGEXT_NEW_BASEPTR;
+ increment_index();
+}
+
+void queue_tlb_flush(void)
+{
+ update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND;
+ update_queue[idx].val = PGEXT_TLB_FLUSH;
+ increment_index();
+}
+
+void queue_invlpg(unsigned long ptr)
+{
+ update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND;
+ update_queue[idx].val = ptr & PAGE_MASK;
+ update_queue[idx].val |= PGEXT_INVLPG;
+ increment_index();
+}
+
+void queue_pgd_pin(unsigned long ptr)
+{
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
+ update_queue[idx].val = PGEXT_PIN_L2_TABLE;
+ increment_index();
+}
+
+void queue_pgd_unpin(unsigned long ptr)
+{
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
+ update_queue[idx].val = PGEXT_UNPIN_TABLE;
+ increment_index();
+}
+
+void queue_pte_pin(unsigned long ptr)
+{
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
+ update_queue[idx].val = PGEXT_PIN_L1_TABLE;
+ increment_index();
+}
+
+void queue_pte_unpin(unsigned long ptr)
+{
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
+ update_queue[idx].val = PGEXT_UNPIN_TABLE;
+ increment_index();
+}
diff --git a/old/xenolinux-2.4.16-sparse/arch/xeno/mm/init.c b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/init.c
new file mode 100644
index 0000000000..71b22ddcac
--- /dev/null
+++ b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/init.c
@@ -0,0 +1,293 @@
+/*
+ * linux/arch/i386/mm/init.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/dma.h>
+#include <asm/apic.h>
+#include <asm/tlb.h>
+
+mmu_gather_t mmu_gathers[NR_CPUS];
+unsigned long highstart_pfn, highend_pfn;
+static unsigned long totalram_pages;
+static unsigned long totalhigh_pages;
+
+int do_check_pgt_cache(int low, int high)
+{
+ int freed = 0;
+ if(pgtable_cache_size > high) {
+ do {
+ if (!QUICKLIST_EMPTY(pgd_quicklist)) {
+ free_pgd_slow(get_pgd_fast());
+ freed++;
+ }
+ if (!QUICKLIST_EMPTY(pte_quicklist)) {
+ pte_free_slow(pte_alloc_one_fast(NULL, 0));
+ freed++;
+ }
+ } while(pgtable_cache_size > low);
+ }
+ return freed;
+}
+
+void show_mem(void)
+{
+ int i, total = 0, reserved = 0;
+ int shared = 0, cached = 0;
+ int highmem = 0;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageHighMem(mem_map+i))
+ highmem++;
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (PageSwapCache(mem_map+i))
+ cached++;
+ else if (page_count(mem_map+i))
+ shared += page_count(mem_map+i) - 1;
+ }
+ printk("%d pages of RAM\n", total);
+ printk("%d pages of HIGHMEM\n",highmem);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
+ printk("%ld pages in page table cache\n",pgtable_cache_size);
+ show_buffers();
+}
+
+/* References to section boundaries */
+
+extern char _text, _etext, _edata, __bss_start, _end;
+extern char __init_begin, __init_end;
+
+static inline void set_pte_phys (unsigned long vaddr,
+ unsigned long phys, pgprot_t flags)
+{
+ pgprot_t prot;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = init_mm.pgd + __pgd_offset(vaddr);
+ if (pgd_none(*pgd)) {
+ printk("PAE BUG #00!\n");
+ return;
+ }
+ pmd = pmd_offset(pgd, vaddr);
+ if (pmd_none(*pmd)) {
+ printk("PAE BUG #01!\n");
+ return;
+ }
+ pte = pte_offset(pmd, vaddr);
+ if (pte_val(*pte))
+ pte_ERROR(*pte);
+ pgprot_val(prot) = pgprot_val(PAGE_KERNEL) | pgprot_val(flags);
+
+ /* We queue directly, avoiding hidden phys->machine translation. */
+ queue_l1_entry_update(__pa(pte), phys | pgprot_val(prot));
+
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+void __set_fixmap (enum fixed_addresses idx, unsigned long phys,
+ pgprot_t flags)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ printk("Invalid __set_fixmap\n");
+ return;
+ }
+ set_pte_phys(address, phys, flags);
+}
+
+static void __init fixrange_init (unsigned long start,
+ unsigned long end, pgd_t *pgd_base)
+{
+ pgd_t *pgd, *kpgd;
+ pmd_t *pmd, *kpmd;
+ pte_t *pte, *kpte;
+ int i, j;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = __pgd_offset(vaddr);
+ j = __pmd_offset(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+ pmd = (pmd_t *)pgd;
+ for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ clear_page(pte);
+ kpgd = pgd_offset_k((unsigned long)pte);
+ kpmd = pmd_offset(kpgd, (unsigned long)pte);
+ kpte = pte_offset(kpmd, (unsigned long)pte);
+ queue_l1_entry_update(__pa(kpte),
+ (*(unsigned long *)kpte)&~_PAGE_RW);
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
+ }
+ vaddr += PMD_SIZE;
+ }
+ j = 0;
+ }
+
+ XENO_flush_page_update_queue();
+}
+
+void __init paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned int max_dma, high, low;
+ unsigned long vaddr;
+
+ max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ low = max_low_pfn;
+ high = highend_pfn;
+
+ if (low < max_dma)
+ {
+ zones_size[ZONE_DMA] = low;
+ }
+ else
+ {
+ zones_size[ZONE_DMA] = max_dma;
+ zones_size[ZONE_NORMAL] = low - max_dma;
+ }
+ free_area_init(zones_size);
+
+ /*
+ * Fixed mappings, only the page table structure has to be created -
+ * mappings will be set by set_fixmap():
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd);
+
+ /*
+ * XXX We do this conversion early, so that all other page tables
+ * will automatically get this mapping.
+ */
+ set_fixmap(FIX_BLKRING_BASE, start_info.blk_ring);
+}
+
+
+static inline int page_is_ram (unsigned long pagenr)
+{
+ return 1;
+}
+
+void __init mem_init(void)
+{
+ int codesize, reservedpages, datasize, initsize;
+ int tmp;
+
+ max_mapnr = num_physpages = max_low_pfn;
+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+ /* this will put all low memory onto the freelists */
+ totalram_pages += free_all_bootmem();
+
+ reservedpages = 0;
+ for (tmp = 0; tmp < max_low_pfn; tmp++)
+ /*
+ * Only count reserved RAM pages
+ */
+ if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
+ reservedpages++;
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ max_mapnr << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10,
+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
+ );
+
+ boot_cpu_data.wp_works_ok = 1;
+}
+
+void free_initmem(void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
+ free_page(addr);
+ totalram_pages++;
+ }
+ printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start < end)
+ printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
+ free_page(start);
+ totalram_pages++;
+ }
+}
+#endif
+
+void si_meminfo(struct sysinfo *val)
+{
+ val->totalram = totalram_pages;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages();
+ val->bufferram = atomic_read(&buffermem_pages);
+ val->totalhigh = totalhigh_pages;
+ val->freehigh = nr_free_highpages();
+ val->mem_unit = PAGE_SIZE;
+ return;
+}
diff --git a/old/xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c
new file mode 100644
index 0000000000..b8f41fb269
--- /dev/null
+++ b/old/xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c
@@ -0,0 +1,26 @@
+
+#include <linux/slab.h>
+#include <linux/list.h>
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ INIT_LIST_HEAD(&mm->context.direct_list);
+ return 0;
+}
+
+/* just free all elements of list identifying directly mapped areas */
+void destroy_context(struct mm_struct *mm)
+{
+ direct_mmap_node_t * node;
+ struct list_head * curr;
+ struct list_head * direct_list = &mm->context.direct_list;
+
+ curr = direct_list->next;
+ while(curr != direct_list){
+ node = list_entry(curr, direct_mmap_node_t, list);
+ curr = curr->next;
+ list_del(&node->list);
+ kfree(node);
+ }
+
+}