aboutsummaryrefslogtreecommitdiffstats
path: root/extras
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2006-03-09 15:57:32 +0100
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2006-03-09 15:57:32 +0100
commit44fb8b72969a9009fab1944f25b7c27311f33dc5 (patch)
tree1ee248400058bb177bad3208009493e9a6e44377 /extras
parent0026c91c773c0d29b151b0be20b95a3fa2464071 (diff)
downloadxen-44fb8b72969a9009fab1944f25b7c27311f33dc5.tar.gz
xen-44fb8b72969a9009fab1944f25b7c27311f33dc5.tar.bz2
xen-44fb8b72969a9009fab1944f25b7c27311f33dc5.zip
64 bit pagetable builder added to mm.c
Signed-off-by: Aravindh Puthiyaparambil <aravindh.puthiyaparambil@unisys.com> Signed-off-by: Grzegorz Milos <gm281@cam.ac.uk>
Diffstat (limited to 'extras')
-rw-r--r--extras/mini-os/Makefile1
-rw-r--r--extras/mini-os/domain_config2
-rw-r--r--extras/mini-os/include/lib.h1
-rw-r--r--extras/mini-os/include/mm.h74
-rw-r--r--extras/mini-os/include/os.h2
-rw-r--r--extras/mini-os/mm.c262
-rw-r--r--extras/mini-os/traps.c24
7 files changed, 248 insertions, 118 deletions
diff --git a/extras/mini-os/Makefile b/extras/mini-os/Makefile
index 65a43def94..044c99c63d 100644
--- a/extras/mini-os/Makefile
+++ b/extras/mini-os/Makefile
@@ -32,6 +32,7 @@ OBJS := $(TARGET_ARCH).o
OBJS += $(patsubst %.c,%.o,$(wildcard *.c))
OBJS += $(patsubst %.c,%.o,$(wildcard lib/*.c))
OBJS += $(patsubst %.c,%.o,$(wildcard xenbus/*.c))
+#OBJS += $(patsubst %.c,%.o,$(wildcard console/*.c))
HDRS := $(wildcard include/*.h)
HDRS += $(wildcard include/xen/*.h)
diff --git a/extras/mini-os/domain_config b/extras/mini-os/domain_config
index d6635c88f9..57f909ac5d 100644
--- a/extras/mini-os/domain_config
+++ b/extras/mini-os/domain_config
@@ -15,3 +15,5 @@ memory = 32
# A name for your domain. All domains must have different names.
name = "Mini-OS"
+
+on_crash = 'destroy'
diff --git a/extras/mini-os/include/lib.h b/extras/mini-os/include/lib.h
index f386cb8053..0bf458565c 100644
--- a/extras/mini-os/include/lib.h
+++ b/extras/mini-os/include/lib.h
@@ -57,6 +57,7 @@
#include <stdarg.h>
+
/* printing */
#define printk printf
#define kprintf printf
diff --git a/extras/mini-os/include/mm.h b/extras/mini-os/include/mm.h
index 613fe29dfa..f88c8be83e 100644
--- a/extras/mini-os/include/mm.h
+++ b/extras/mini-os/include/mm.h
@@ -25,18 +25,34 @@
#ifndef _MM_H_
#define _MM_H_
-#ifdef __i386__
+#if defined(__i386__)
#include <xen/arch-x86_32.h>
-#endif
-
-#ifdef __x86_64__
+#elif defined(__x86_64__)
#include <xen/arch-x86_64.h>
+#else
+#error "Unsupported architecture"
#endif
+#include <lib.h>
-#ifdef __x86_64__
+#define L1_FRAME 1
+#define L2_FRAME 2
+#define L3_FRAME 3
#define L1_PAGETABLE_SHIFT 12
+
+#if defined(__i386__)
+
+#define L2_PAGETABLE_SHIFT 22
+
+#define L1_PAGETABLE_ENTRIES 1024
+#define L2_PAGETABLE_ENTRIES 1024
+
+#define PADDR_BITS 32
+#define PADDR_MASK (~0UL)
+
+#elif defined(__x86_64__)
+
#define L2_PAGETABLE_SHIFT 21
#define L3_PAGETABLE_SHIFT 30
#define L4_PAGETABLE_SHIFT 39
@@ -52,29 +68,29 @@
#define PADDR_MASK ((1UL << PADDR_BITS)-1)
#define VADDR_MASK ((1UL << VADDR_BITS)-1)
-#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT)
-
-#endif
-
-
-
-#ifdef __i386__
+/* Get physical address of page mapped by pte (paddr_t). */
+#define l1e_get_paddr(x) \
+ ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
+#define l2e_get_paddr(x) \
+ ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
+#define l3e_get_paddr(x) \
+ ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
+#define l4e_get_paddr(x) \
+ ((unsigned long)(((x) & (PADDR_MASK&PAGE_MASK))))
-#define L1_PAGETABLE_SHIFT 12
-#define L2_PAGETABLE_SHIFT 22
+#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
+#define L3_MASK ((1UL << L4_PAGETABLE_SHIFT) - 1)
-#define L1_PAGETABLE_ENTRIES 1024
-#define L2_PAGETABLE_ENTRIES 1024
-
-#elif defined(__x86_64__)
#endif
+#define L1_MASK ((1UL << L2_PAGETABLE_SHIFT) - 1)
+
/* Given a virtual address, get an entry offset into a page table. */
#define l1_table_offset(_a) \
(((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
#define l2_table_offset(_a) \
(((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
-#ifdef __x86_64__
+#if defined(__x86_64__)
#define l3_table_offset(_a) \
(((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
#define l4_table_offset(_a) \
@@ -92,8 +108,15 @@
#define _PAGE_PSE 0x080UL
#define _PAGE_GLOBAL 0x100UL
-#define L1_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)
-#define L2_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_USER)
+#if defined(__i386__)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
+#elif defined(__x86_64__)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#endif
#define PAGE_SIZE (1UL << L1_PAGETABLE_SHIFT)
#define PAGE_SHIFT L1_PAGETABLE_SHIFT
@@ -124,9 +147,9 @@ static __inline__ unsigned long machine_to_phys(unsigned long machine)
return phys;
}
-#ifdef __x86_64__
+#if defined(__x86_64__)
#define VIRT_START 0xFFFFFFFF00000000UL
-#else
+#elif defined(__i386__)
#define VIRT_START 0xC0000000UL
#endif
@@ -136,6 +159,11 @@ static __inline__ unsigned long machine_to_phys(unsigned long machine)
#define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt)))
#define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach)))
#define mfn_to_virt(_mfn) (mach_to_virt(_mfn << PAGE_SHIFT))
+#define pfn_to_virt(_pfn) (to_virt(_pfn << PAGE_SHIFT))
+
+/* Pagetable walking. */
+#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT)
+#define pte_to_virt(_pte) to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << PAGE_SHIFT)
void init_mm(void);
unsigned long alloc_pages(int order);
diff --git a/extras/mini-os/include/os.h b/extras/mini-os/include/os.h
index 2fbb11c203..1ad51e99af 100644
--- a/extras/mini-os/include/os.h
+++ b/extras/mini-os/include/os.h
@@ -59,6 +59,8 @@ extern shared_info_t *HYPERVISOR_shared_info;
void trap_init(void);
+
+
/*
* The use of 'barrier' in the following reflects their use as local-lock
* operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
diff --git a/extras/mini-os/mm.c b/extras/mini-os/mm.c
index 700805723e..c26ae2817a 100644
--- a/extras/mini-os/mm.c
+++ b/extras/mini-os/mm.c
@@ -51,7 +51,8 @@
unsigned long *phys_to_machine_mapping;
extern char *stack;
extern char _text, _etext, _edata, _end;
-
+extern void do_exit(void);
+extern void page_walk(unsigned long virt_addr);
/*********************
* ALLOCATION BITMAP
@@ -64,7 +65,6 @@ static unsigned long *alloc_bitmap;
#define allocated_in_map(_pn) \
(alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & (1<<((_pn)&(PAGES_PER_MAPWORD-1))))
-
/*
* Hint regarding bitwise arithmetic in map_{alloc,free}:
* -(1<<n) sets all bits >= n.
@@ -208,7 +208,6 @@ static void init_page_allocator(unsigned long min, unsigned long max)
unsigned long range, bitmap_size;
chunk_head_t *ch;
chunk_tail_t *ct;
-
for ( i = 0; i < FREELIST_SIZE; i++ )
{
free_head[i] = &free_tail[i];
@@ -366,106 +365,181 @@ void free_pages(void *pointer, int order)
free_head[order] = freed_ch;
}
-void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
-{
- unsigned long pfn_to_map, pt_frame;
- unsigned long mach_ptd, max_mach_ptd;
- int count;
- unsigned long mach_pte, virt_pte;
- unsigned long *ptd = (unsigned long *)start_info.pt_base;
- mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
+
+
+void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
+ unsigned long offset, unsigned long level)
+{
+ unsigned long *tab = (unsigned long *)start_info.pt_base;
+ unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
+ unsigned long prot_e, prot_t, pincmd;
+ mmu_update_t mmu_updates[0];
struct mmuext_op pin_request;
- /* Firstly work out what is the first pfn that is not yet in page tables
- NB. Assuming that builder fills whole pt_frames (which it does at the
- moment)
- */
- pfn_to_map = (start_info.nr_pt_frames - 1) * L1_PAGETABLE_ENTRIES;
- DEBUG("start_pfn=%ld, first pfn_to_map %ld, max_pfn=%ld",
- *start_pfn, pfn_to_map, *max_pfn);
-
- /* Machine address of page table directory */
- mach_ptd = phys_to_machine(to_phys(start_info.pt_base));
- mach_ptd += sizeof(void *) *
- l2_table_offset((unsigned long)to_virt(PFN_PHYS(pfn_to_map)));
-
- max_mach_ptd = sizeof(void *) *
- l2_table_offset((unsigned long)to_virt(PFN_PHYS(*max_pfn)));
-
- /* Check that we are not trying to access Xen region */
- if(max_mach_ptd > sizeof(void *) * l2_table_offset(HYPERVISOR_VIRT_START))
+ DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
+ "prev_l_mfn=%lx, offset=%lx\n",
+ level, *pt_pfn, prev_l_mfn, offset);
+
+ if (level == L1_FRAME)
{
- printk("WARNING: mini-os will not use all the memory supplied\n");
- max_mach_ptd = sizeof(void *) * l2_table_offset(HYPERVISOR_VIRT_START);
- *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
+ prot_e = L1_PROT;
+ prot_t = L2_PROT;
+ pincmd = MMUEXT_PIN_L1_TABLE;
}
- max_mach_ptd += phys_to_machine(to_phys(start_info.pt_base));
- DEBUG("Max_mach_ptd 0x%lx", max_mach_ptd);
-
- pt_frame = *start_pfn;
- /* Should not happen - no empty, mapped pages */
- if(pt_frame >= pfn_to_map)
+#if (defined __x86_64__)
+ else if (level == L2_FRAME)
+ {
+ prot_e = L2_PROT;
+ prot_t = L3_PROT;
+ pincmd = MMUEXT_PIN_L2_TABLE;
+ }
+ else if (level == L3_FRAME)
+ {
+ prot_e = L3_PROT;
+ prot_t = L4_PROT;
+ pincmd = MMUEXT_PIN_L3_TABLE;
+ }
+#endif
+ else
+ {
+ printk("new_pt_frame() called with invalid level number %d\n", level);
+ do_exit();
+ }
+
+ /* Update the entry */
+#if (defined __x86_64__)
+ tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
+ tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
+#endif
+ mmu_updates[0].ptr = (tab[l2_table_offset(pt_page)] & PAGE_MASK) +
+ sizeof(void *)* l1_table_offset(pt_page);
+ mmu_updates[0].val = pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
+ (prot_e & ~_PAGE_RW);
+ if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
{
- printk("ERROR: Not even a single empty, mapped page\n");
- *(int*)0=0;
+ printk("PTE for new page table page could not be updated\n");
+ do_exit();
+ }
+
+ /* Pin the page to provide correct protection */
+ pin_request.cmd = pincmd;
+ pin_request.arg1.mfn = pfn_to_mfn(*pt_pfn);
+ if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
+ {
+ printk("ERROR: pinning failed\n");
+ do_exit();
+ }
+
+ /* Now fill the new page table page with entries.
+ Update the page directory as well. */
+ mmu_updates[0].ptr = (prev_l_mfn << PAGE_SHIFT) + sizeof(void *) * offset;
+ mmu_updates[0].val = pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
+ if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
+ {
+ printk("ERROR: mmu_update failed\n");
+ do_exit();
}
+
+ *pt_pfn += 1;
+}
+
+void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
+{
+ unsigned long start_address, end_address;
+ unsigned long pfn_to_map, pt_pfn = *start_pfn;
+ static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
+ unsigned long *tab = (unsigned long *)start_info.pt_base;
+ unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
+ unsigned long page, offset;
+ int count = 0;
+
+#if defined(__x86_64__)
+ pfn_to_map = (start_info.nr_pt_frames - 3) * L1_PAGETABLE_ENTRIES;
+#else
+ pfn_to_map = (start_info.nr_pt_frames - 1) * L1_PAGETABLE_ENTRIES;
+#endif
+ start_address = (unsigned long)pfn_to_virt(pfn_to_map);
+ end_address = (unsigned long)pfn_to_virt(*max_pfn);
- while(mach_ptd < max_mach_ptd)
+ /* We worked out the virtual memory range to map, now mapping loop */
+ printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
+
+ while(start_address < end_address)
{
- /* Correct protection needs to be set for the new page table frame */
- virt_pte = (unsigned long)to_virt(PFN_PHYS(pt_frame));
- mach_pte = ptd[l2_table_offset(virt_pte)] & ~(PAGE_SIZE-1);
- mach_pte += sizeof(void *) * l1_table_offset(virt_pte);
- DEBUG("New page table page: pfn=0x%lx, mfn=0x%lx, virt_pte=0x%lx, "
- "mach_pte=0x%lx", pt_frame, pfn_to_mfn(pt_frame),
- virt_pte, mach_pte);
-
- /* Update the entry */
- mmu_updates[0].ptr = mach_pte;
- mmu_updates[0].val = pfn_to_mfn(pt_frame) << PAGE_SHIFT |
- (L1_PROT & ~_PAGE_RW);
- if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
- {
- printk("PTE for new page table page could not be updated\n");
- *(int*)0=0;
- }
-
- /* Pin the page to provide correct protection */
- pin_request.cmd = MMUEXT_PIN_L1_TABLE;
- pin_request.arg1.mfn = pfn_to_mfn(pt_frame);
- if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
- {
- printk("ERROR: pinning failed\n");
- *(int*)0=0;
- }
+ tab = (unsigned long *)start_info.pt_base;
+ mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
+
+#if defined(__x86_64__)
+ offset = l4_table_offset(start_address);
+ /* Need new L3 pt frame */
+ if(!(start_address & L3_MASK))
+ new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
- /* Now fill the new page table page with entries.
- Update the page directory as well. */
- count = 0;
- mmu_updates[count].ptr = mach_ptd;
- mmu_updates[count].val = pfn_to_mfn(pt_frame) << PAGE_SHIFT |
- L2_PROT;
+ page = tab[offset];
+ mfn = pte_to_mfn(page);
+ tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+ offset = l3_table_offset(start_address);
+ /* Need new L2 pt frame */
+ if(!(start_address & L2_MASK))
+ new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
+
+ page = tab[offset];
+ mfn = pte_to_mfn(page);
+ tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+#endif
+ offset = l2_table_offset(start_address);
+ /* Need new L1 pt frame */
+ if(!(start_address & L1_MASK))
+ new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
+
+ page = tab[offset];
+ mfn = pte_to_mfn(page);
+ offset = l1_table_offset(start_address);
+
+ mmu_updates[count].ptr = (mfn << PAGE_SHIFT) + sizeof(void *) * offset;
+ mmu_updates[count].val =
+ pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
count++;
- mach_ptd += sizeof(void *);
- mach_pte = phys_to_machine(PFN_PHYS(pt_frame++));
-
- for(;count <= L1_PAGETABLE_ENTRIES && pfn_to_map <= *max_pfn; count++)
+ if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
{
- mmu_updates[count].ptr = mach_pte;
- mmu_updates[count].val =
- pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
- if(count == 1) DEBUG("mach_pte 0x%lx", mach_pte);
- mach_pte += sizeof(void *);
+ if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
+ {
+ printk("PTE could not be updated\n");
+ do_exit();
+ }
+ count = 0;
}
- if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
- {
- printk("ERROR: mmu_update failed\n");
- *(int*)0=0;
+ start_address += PAGE_SIZE;
+ }
+
+ *start_pfn = pt_pfn;
+}
+
+
+void mem_test(unsigned long *start_add, unsigned long *end_add)
+{
+ unsigned long mask = 0x10000;
+ unsigned long *pointer;
+
+ for(pointer = start_add; pointer < end_add; pointer++)
+ {
+ if(!(((unsigned long)pointer) & 0xfffff))
+ {
+ printk("Writing to %lx\n", pointer);
+ page_walk((unsigned long)pointer);
}
- (*start_pfn)++;
+ *pointer = (unsigned long)pointer & ~mask;
+ }
+
+ for(pointer = start_add; pointer < end_add; pointer++)
+ {
+ if(((unsigned long)pointer & ~mask) != *pointer)
+ printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n",
+ (unsigned long)pointer,
+ *pointer,
+ ((unsigned long)pointer & ~mask));
}
- *start_pfn = pt_frame;
}
void init_mm(void)
@@ -485,23 +559,21 @@ void init_mm(void)
phys_to_machine_mapping = (unsigned long *)start_info.mfn_list;
/* First page follows page table pages and 3 more pages (store page etc) */
- start_pfn = PFN_UP(to_phys(start_info.pt_base)) + start_info.nr_pt_frames + 3;
+ start_pfn = PFN_UP(to_phys(start_info.pt_base)) +
+ start_info.nr_pt_frames + 3;
max_pfn = start_info.nr_pages;
-
+
printk(" start_pfn: %lx\n", start_pfn);
printk(" max_pfn: %lx\n", max_pfn);
-
-#ifdef __i386__
build_pagetable(&start_pfn, &max_pfn);
-#endif
-
+
/*
* now we can initialise the page allocator
*/
printk("MM: Initialise page allocator for %lx(%lx)-%lx(%lx)\n",
(u_long)to_virt(PFN_PHYS(start_pfn)), PFN_PHYS(start_pfn),
(u_long)to_virt(PFN_PHYS(max_pfn)), PFN_PHYS(max_pfn));
- init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn));
+ init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn));
printk("MM: done\n");
}
diff --git a/extras/mini-os/traps.c b/extras/mini-os/traps.c
index 1aeffef56b..ad54e31c5f 100644
--- a/extras/mini-os/traps.c
+++ b/extras/mini-os/traps.c
@@ -70,6 +70,30 @@ DO_ERROR(12, "stack segment", stack_segment)
DO_ERROR_INFO(17, "alignment check", alignment_check, BUS_ADRALN, 0)
DO_ERROR(18, "machine check", machine_check)
+void page_walk(unsigned long virt_address)
+{
+ unsigned long *tab = (unsigned long *)start_info.pt_base;
+ unsigned long addr = virt_address, page;
+ printk("Pagetable walk from virt %lx, base %lx:\n", virt_address, start_info.pt_base);
+
+#if defined(__x86_64__)
+ page = tab[l4_table_offset(addr)];
+ tab = to_virt(mfn_to_pfn(pte_to_mfn(page)) << PAGE_SHIFT);
+ printk(" L4 = %p (%p) [offset = %lx]\n", page, tab, l4_table_offset(addr));
+
+ page = tab[l3_table_offset(addr)];
+ tab = to_virt(mfn_to_pfn(pte_to_mfn(page)) << PAGE_SHIFT);
+ printk(" L3 = %p (%p) [offset = %lx]\n", page, tab, l3_table_offset(addr));
+#endif
+ page = tab[l2_table_offset(addr)];
+ tab = to_virt(mfn_to_pfn(pte_to_mfn(page)) << PAGE_SHIFT);
+ printk(" L2 = %p (%p) [offset = %lx]\n", page, tab, l2_table_offset(addr));
+
+ page = tab[l1_table_offset(addr)];
+ printk(" L1 = %p (%p) [offset = %lx]\n", page, tab, l1_table_offset(addr));
+
+}
+
void do_page_fault(struct pt_regs *regs, unsigned long error_code,
unsigned long addr)
{