aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-12-30 18:27:27 +0000
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-12-30 18:27:27 +0000
commit49f6e16ab2c596ced7b4feee709bc01e71808431 (patch)
tree9319356c95c597ad7fe2560a0fe65fef9cb59fea
parentb320f8524542f19d81f9cc73dad93e56bb9cc549 (diff)
downloadxen-49f6e16ab2c596ced7b4feee709bc01e71808431.tar.gz
xen-49f6e16ab2c596ced7b4feee709bc01e71808431.tar.bz2
xen-49f6e16ab2c596ced7b4feee709bc01e71808431.zip
bitkeeper revision 1.1159.170.74 (41d4488f9fINTxUzyjoq2FnTn5AvMQ)
Improved memory bootstrapping takes into account e820 RAM holes.
-rw-r--r--.rootkeys1
-rw-r--r--xen/arch/x86/e820.c11
-rw-r--r--xen/arch/x86/memory.c45
-rw-r--r--xen/arch/x86/nmi.c1
-rw-r--r--xen/arch/x86/setup.c108
-rw-r--r--xen/arch/x86/x86_32/mm.c23
-rw-r--r--xen/common/memory.c51
-rw-r--r--xen/common/page_alloc.c137
-rw-r--r--xen/include/asm-x86/config.h12
-rw-r--r--xen/include/asm-x86/e820.h4
-rw-r--r--xen/include/asm-x86/mm.h2
-rw-r--r--xen/include/asm-x86/page.h1
-rw-r--r--xen/include/xen/lib.h3
-rw-r--r--xen/include/xen/mm.h8
14 files changed, 214 insertions, 193 deletions
diff --git a/.rootkeys b/.rootkeys
index 985b8402bf..b607089e0b 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -712,7 +712,6 @@
3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen/common/kernel.c
3e4cd9d8LAAghUY0hNIK72uc2ch_Nw xen/common/keyhandler.c
3ddb79bduhSEZI8xa7IbGQCpap5y2A xen/common/lib.c
-3ddb79bdS39UXxUtZnaScie83-7VTQ xen/common/memory.c
41a61536SZbR6cj1ukWTb0DYU-vz9w xen/common/multicall.c
3ddb79bdD4SLmmdMD7yLW5HcUWucXw xen/common/page_alloc.c
3e54c38dkHAev597bPr71-hGzTdocg xen/common/perfc.c
diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c
index 5ebc2324e7..63bc128c7e 100644
--- a/xen/arch/x86/e820.c
+++ b/xen/arch/x86/e820.c
@@ -27,12 +27,12 @@ static void __init add_memory_region(unsigned long long start,
#define E820_DEBUG 1
-static void __init print_memory_map(char *who)
+static void __init print_memory_map(void)
{
int i;
for (i = 0; i < e820.nr_map; i++) {
- printk(" %s: %016Lx - %016Lx ", who,
+ printk(" %016Lx - %016Lx ",
e820.map[i].addr,
e820.map[i].addr + e820.map[i].size);
switch (e820.map[i].type) {
@@ -305,19 +305,18 @@ static unsigned long __init find_max_pfn(void)
return max_pfn;
}
-static char * __init machine_specific_memory_setup(
+static void __init machine_specific_memory_setup(
struct e820entry *raw, int raw_nr)
{
char nr = (char)raw_nr;
- char *who = "Pseudo-e820";
sanitize_e820_map(raw, &nr);
(void)copy_e820_map(raw, nr);
- return who;
}
unsigned long init_e820(struct e820entry *raw, int raw_nr)
{
+ machine_specific_memory_setup(raw, raw_nr);
printk(KERN_INFO "Physical RAM map:\n");
- print_memory_map(machine_specific_memory_setup(raw, raw_nr));
+ print_memory_map();
return find_max_pfn();
}
diff --git a/xen/arch/x86/memory.c b/xen/arch/x86/memory.c
index 7cab825a4d..a540b06fca 100644
--- a/xen/arch/x86/memory.c
+++ b/xen/arch/x86/memory.c
@@ -86,6 +86,7 @@
#include <xen/config.h>
#include <xen/init.h>
+#include <xen/kernel.h>
#include <xen/lib.h>
#include <xen/mm.h>
#include <xen/sched.h>
@@ -140,9 +141,34 @@ static struct {
/* Private domain structs for DOMID_XEN and DOMID_IO. */
static struct domain *dom_xen, *dom_io;
+/* Frame table and its size in pages. */
+struct pfn_info *frame_table;
+unsigned long frame_table_size;
+unsigned long max_page;
+
+void __init init_frametable(void)
+{
+ unsigned long i, p;
+
+ frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START;
+ frame_table_size = max_page * sizeof(struct pfn_info);
+ frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
+
+ for ( i = 0; i < frame_table_size; i += (4UL << 20) )
+ {
+ p = alloc_boot_pages(min(frame_table_size - i, 4UL << 20), 4UL << 20);
+ if ( p == 0 )
+ panic("Not enough memory for frame table\n");
+ idle_pg_table[(FRAMETABLE_VIRT_START + i) >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(p | __PAGE_HYPERVISOR | _PAGE_PSE);
+ }
+
+ memset(frame_table, 0, frame_table_size);
+}
+
void arch_init_memory(void)
{
- unsigned long mfn;
+ unsigned long mfn, i;
/*
* We are rather picky about the layout of 'struct pfn_info'. The
@@ -185,13 +211,13 @@ void arch_init_memory(void)
dom_io->id = DOMID_IO;
/* M2P table is mappable read-only by privileged domains. */
- for ( mfn = virt_to_phys(&machine_to_phys_mapping[0<<20])>>PAGE_SHIFT;
- mfn < virt_to_phys(&machine_to_phys_mapping[1<<20])>>PAGE_SHIFT;
- mfn++ )
+ mfn = l2_pgentry_to_pagenr(
+ idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]);
+ for ( i = 0; i < 1024; i++ )
{
- frame_table[mfn].count_info = PGC_allocated | 1;
- frame_table[mfn].u.inuse.type_info = PGT_gdt_page | 1; /* non-RW */
- frame_table[mfn].u.inuse.domain = dom_xen;
+ frame_table[mfn+i].count_info = PGC_allocated | 1;
+ frame_table[mfn+i].u.inuse.type_info = PGT_gdt_page | 1; /* non-RW */
+ frame_table[mfn+i].u.inuse.domain = dom_xen;
}
}
@@ -500,11 +526,10 @@ static int alloc_l2_table(struct pfn_info *page)
pl2e = map_domain_mem(page_nr << PAGE_SHIFT);
- for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ ) {
+ for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
if ( unlikely(!get_page_from_l2e(pl2e[i], page_nr, d, i)) )
goto fail;
- }
-
+
#if defined(__i386__)
/* Now we add our private high mappings. */
memcpy(&pl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c
index ed597aec0c..556a661b57 100644
--- a/xen/arch/x86/nmi.c
+++ b/xen/arch/x86/nmi.c
@@ -286,6 +286,7 @@ void nmi_watchdog_tick (struct xen_regs * regs)
if ( alert_counter[cpu] == 5*nmi_hz )
{
console_force_unlock();
+ printk("Watchdog timer detects that CPU%d is stuck!\n", cpu);
fatal_trap(TRAP_nmi, regs);
}
}
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 1b0cd8494c..6ef530c4e6 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -30,7 +30,9 @@ integer_param("dom0_mem", opt_dom0_mem);
* pfn_info table and allocation bitmap.
*/
static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
+#if defined(__x86_64__)
integer_param("xenheap_megabytes", opt_xenheap_megabytes);
+#endif
/* opt_noht: If true, Hyperthreading is ignored. */
int opt_noht = 0;
@@ -461,15 +463,14 @@ static void __init start_of_day(void)
void __init __start_xen(multiboot_info_t *mbi)
{
- unsigned long max_page;
unsigned char *cmdline;
module_t *mod = (module_t *)__va(mbi->mods_addr);
void *heap_start;
- unsigned long max_mem;
+ unsigned long firsthole_start, nr_pages;
unsigned long dom0_memory_start, dom0_memory_end;
unsigned long initial_images_start, initial_images_end;
struct e820entry e820_raw[E820MAX];
- int e820_raw_nr = 0, bytes = 0;
+ int i, e820_raw_nr = 0, bytes = 0;
/* Parse the command-line options. */
if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) )
@@ -490,12 +491,6 @@ void __init __start_xen(multiboot_info_t *mbi)
for ( ; ; ) ;
}
- if ( opt_xenheap_megabytes < 4 )
- {
- printk("FATAL ERROR: Xen heap is too small to safely continue!\n");
- for ( ; ; ) ;
- }
-
xenheap_phys_end = opt_xenheap_megabytes << 20;
if ( mbi->flags & MBI_MEMMAP )
@@ -518,9 +513,9 @@ void __init __start_xen(multiboot_info_t *mbi)
e820_raw[0].addr = 0;
e820_raw[0].size = mbi->mem_lower << 10;
e820_raw[0].type = E820_RAM;
- e820_raw[0].addr = 0x100000;
- e820_raw[0].size = mbi->mem_upper << 10;
- e820_raw[0].type = E820_RAM;
+ e820_raw[1].addr = 0x100000;
+ e820_raw[1].size = mbi->mem_upper << 10;
+ e820_raw[1].type = E820_RAM;
e820_raw_nr = 2;
}
else
@@ -529,76 +524,70 @@ void __init __start_xen(multiboot_info_t *mbi)
for ( ; ; ) ;
}
- max_mem = max_page = init_e820(e820_raw, e820_raw_nr);
- max_mem = max_page = (mbi->mem_upper+1024) >> (PAGE_SHIFT - 10);
+ max_page = init_e820(e820_raw, e820_raw_nr);
-#if defined(__i386__)
+ /* Find the first high-memory RAM hole. */
+ for ( i = 0; i < e820.nr_map; i++ )
+ if ( (e820.map[i].type == E820_RAM) &&
+ (e820.map[i].addr >= 0x100000) )
+ break;
+ firsthole_start = e820.map[i].addr + e820.map[i].size;
- initial_images_start = DIRECTMAP_PHYS_END;
+ /* Relocate the Multiboot modules. */
+ initial_images_start = xenheap_phys_end;
initial_images_end = initial_images_start +
(mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
- if ( initial_images_end > (max_page << PAGE_SHIFT) )
+ if ( initial_images_end > firsthole_start )
{
printk("Not enough memory to stash the DOM0 kernel image.\n");
for ( ; ; ) ;
}
+#if defined(__i386__)
memmove((void *)initial_images_start, /* use low mapping */
(void *)mod[0].mod_start, /* use low mapping */
mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
-
- if ( opt_xenheap_megabytes > XENHEAP_DEFAULT_MB )
- {
- printk("Xen heap size is limited to %dMB - you specified %dMB.\n",
- XENHEAP_DEFAULT_MB, opt_xenheap_megabytes);
- for ( ; ; ) ;
- }
-
- ASSERT((sizeof(struct pfn_info) << 20) <=
- (FRAMETABLE_VIRT_END - FRAMETABLE_VIRT_START));
-
- init_frametable((void *)FRAMETABLE_VIRT_START, max_page);
-
#elif defined(__x86_64__)
-
- init_frametable(__va(xenheap_phys_end), max_page);
-
- initial_images_start = __pa(frame_table) + frame_table_size;
- initial_images_end = initial_images_start +
- (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
- if ( initial_images_end > (max_page << PAGE_SHIFT) )
- {
- printk("Not enough memory to stash the DOM0 kernel image.\n");
- for ( ; ; ) ;
- }
memmove(__va(initial_images_start),
__va(mod[0].mod_start),
mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
-
#endif
- dom0_memory_start = (initial_images_end + ((4<<20)-1)) & ~((4<<20)-1);
- dom0_memory_end = dom0_memory_start + (opt_dom0_mem << 10);
- dom0_memory_end = (dom0_memory_end + PAGE_SIZE - 1) & PAGE_MASK;
-
- /* Cheesy sanity check: enough memory for DOM0 allocation + some slack? */
- if ( (dom0_memory_end + (8<<20)) > (max_page << PAGE_SHIFT) )
+ /* Initialise boot-time allocator with all RAM situated after modules. */
+ heap_start = memguard_init(&_end);
+ heap_start = __va(init_boot_allocator(__pa(heap_start)));
+ nr_pages = 0;
+ for ( i = 0; i < e820.nr_map; i++ )
+ {
+ if ( e820.map[i].type != E820_RAM )
+ continue;
+ nr_pages += e820.map[i].size >> PAGE_SHIFT;
+ if ( (e820.map[i].addr + e820.map[i].size) >= initial_images_end )
+ init_boot_pages((e820.map[i].addr < initial_images_end) ?
+ initial_images_end : e820.map[i].addr,
+ e820.map[i].addr + e820.map[i].size);
+ }
+
+ printk("System RAM: %luMB (%lukB)\n",
+ nr_pages >> (20 - PAGE_SHIFT),
+ nr_pages << (PAGE_SHIFT - 10));
+
+ /* Allocate an aligned chunk of RAM for DOM0. */
+ dom0_memory_start = alloc_boot_pages(opt_dom0_mem << 10, 4UL << 20);
+ dom0_memory_end = dom0_memory_start + (opt_dom0_mem << 10);
+ if ( dom0_memory_start == 0 )
{
printk("Not enough memory for DOM0 memory reservation.\n");
for ( ; ; ) ;
}
- printk("Initialised %luMB memory (%lu pages) on a %luMB machine\n",
- max_page >> (20-PAGE_SHIFT), max_page,
- max_mem >> (20-PAGE_SHIFT));
+ init_frametable();
- heap_start = memguard_init(&_end);
- heap_start = __va(init_heap_allocator(__pa(heap_start), max_page));
-
- init_xenheap_pages(__pa(heap_start), xenheap_phys_end);
- printk("Xen heap size is %luKB\n",
- (xenheap_phys_end-__pa(heap_start))/1024 );
+ end_boot_allocator();
- init_domheap_pages(dom0_memory_end, max_page << PAGE_SHIFT);
+ init_xenheap_pages(__pa(heap_start), xenheap_phys_end);
+ printk("Xen heap: %luMB (%lukB)\n",
+ (xenheap_phys_end-__pa(heap_start)) >> 20,
+ (xenheap_phys_end-__pa(heap_start)) >> 10);
/* Initialise the slab allocator. */
xmem_cache_init();
@@ -644,8 +633,7 @@ void __init __start_xen(multiboot_info_t *mbi)
panic("Could not set up DOM0 guest OS\n");
/* The stash space for the initial kernel image can now be freed up. */
- init_domheap_pages(__pa(frame_table) + frame_table_size,
- dom0_memory_start);
+ init_domheap_pages(initial_images_start, initial_images_end);
scrub_heap_pages();
diff --git a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c
index c22d527ac7..d532ea8e9c 100644
--- a/xen/arch/x86/x86_32/mm.c
+++ b/xen/arch/x86/x86_32/mm.c
@@ -57,14 +57,25 @@ void __set_fixmap(enum fixed_addresses idx,
void __init paging_init(void)
{
void *ioremap_pt;
- int i;
+ unsigned long v, l2e;
+ struct pfn_info *pg;
+
+ /* Allocate and map the machine-to-phys table. */
+ if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
+ panic("Not enough memory to bootstrap Xen.\n");
+ idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(page_to_phys(pg) | __PAGE_HYPERVISOR | _PAGE_PSE);
- /* Xen heap mappings can be GLOBAL. */
+ /* Xen 4MB mappings can all be GLOBAL. */
if ( cpu_has_pge )
{
- for ( i = 0; i < DIRECTMAP_PHYS_END; i += (1 << L2_PAGETABLE_SHIFT) )
- ((unsigned long *)idle_pg_table)
- [(i + PAGE_OFFSET) >> L2_PAGETABLE_SHIFT] |= _PAGE_GLOBAL;
+ for ( v = HYPERVISOR_VIRT_START; v; v += (1 << L2_PAGETABLE_SHIFT) )
+ {
+ l2e = l2_pgentry_val(idle_pg_table[v >> L2_PAGETABLE_SHIFT]);
+ if ( l2e & _PAGE_PSE )
+ l2e |= _PAGE_GLOBAL;
+ idle_pg_table[v >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(l2e);
+ }
}
/* Create page table for ioremap(). */
@@ -404,7 +415,7 @@ void *memguard_init(void *heap_start)
l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
(j << L1_PAGETABLE_SHIFT) |
__PAGE_HYPERVISOR);
- idle_pg_table[i] = idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
+ idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
}
diff --git a/xen/common/memory.c b/xen/common/memory.c
deleted file mode 100644
index 2dfdd10e07..0000000000
--- a/xen/common/memory.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/******************************************************************************
- * memory.c
- *
- * Copyright (c) 2002-2004 K A Fraser
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <xen/config.h>
-#include <xen/init.h>
-#include <xen/lib.h>
-#include <xen/mm.h>
-#include <xen/sched.h>
-#include <xen/errno.h>
-#include <xen/perfc.h>
-#include <xen/irq.h>
-#include <asm/page.h>
-#include <asm/flushtlb.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/domain_page.h>
-
-/* Frame table and its size in pages. */
-struct pfn_info *frame_table;
-unsigned long frame_table_size;
-unsigned long max_page;
-
-void __init init_frametable(void *frametable_vstart, unsigned long nr_pages)
-{
- max_page = nr_pages;
- frame_table_size = nr_pages * sizeof(struct pfn_info);
- frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
- frame_table = frametable_vstart;
-
- if ( (__pa(frame_table) + frame_table_size) > (max_page << PAGE_SHIFT) )
- panic("Not enough memory for frame table - reduce Xen heap size?\n");
-
- memset(frame_table, 0, frame_table_size);
-}
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7e7824219a..0127f52c4d 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -37,6 +37,9 @@
static char opt_badpage[100] = "";
string_param("badpage", opt_badpage);
+#define round_pgdown(_p) ((_p)&PAGE_MASK)
+#define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
+
/*********************
* ALLOCATION BITMAP
* One bit per page of memory. Bit set => page is allocated.
@@ -98,7 +101,7 @@ static void map_free(unsigned long first_page, unsigned long nr_pages)
ASSERT(allocated_in_map(first_page + i));
#endif
- curr_idx = first_page / PAGES_PER_MAPWORD;
+ curr_idx = first_page / PAGES_PER_MAPWORD;
start_off = first_page & (PAGES_PER_MAPWORD-1);
end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
@@ -118,55 +121,36 @@ static void map_free(unsigned long first_page, unsigned long nr_pages)
/*************************
- * BINARY BUDDY ALLOCATOR
+ * BOOT-TIME ALLOCATOR
*/
-#define MEMZONE_XEN 0
-#define MEMZONE_DOM 1
-#define NR_ZONES 2
-
-/* Up to 2^10 pages can be allocated at once. */
-#define MIN_ORDER 0
-#define MAX_ORDER 10
-#define NR_ORDERS (MAX_ORDER - MIN_ORDER + 1)
-static struct list_head heap[NR_ZONES][NR_ORDERS];
-
-static unsigned long avail[NR_ZONES];
-
-#define round_pgdown(_p) ((_p)&PAGE_MASK)
-#define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
-
-static spinlock_t heap_lock = SPIN_LOCK_UNLOCKED;
-
-/* Initialise allocator to handle up to @max_pages. */
-unsigned long init_heap_allocator(
- unsigned long bitmap_start, unsigned long max_pages)
+/* Initialise allocator to handle up to @max_page pages. */
+unsigned long init_boot_allocator(unsigned long bitmap_start)
{
- int i, j;
- unsigned long bad_pfn;
- char *p;
-
- memset(avail, 0, sizeof(avail));
-
- for ( i = 0; i < NR_ZONES; i++ )
- for ( j = 0; j < NR_ORDERS; j++ )
- INIT_LIST_HEAD(&heap[i][j]);
-
bitmap_start = round_pgup(bitmap_start);
/* Allocate space for the allocation bitmap. */
- bitmap_size = max_pages / 8;
+ bitmap_size = max_page / 8;
bitmap_size = round_pgup(bitmap_size);
alloc_bitmap = (unsigned long *)phys_to_virt(bitmap_start);
/* All allocated by default. */
memset(alloc_bitmap, ~0, bitmap_size);
- /*
- * Process the bad-page list. Marking the page free in the bitmap will
- * indicate to init_heap_pages() that it should not be placed on the
- * buddy lists.
- */
+ return bitmap_start + bitmap_size;
+}
+
+void init_boot_pages(unsigned long ps, unsigned long pe)
+{
+ unsigned long bad_pfn;
+ char *p;
+
+ ps = round_pgup(ps);
+ pe = round_pgdown(pe);
+
+ map_free(ps >> PAGE_SHIFT, (pe - ps) >> PAGE_SHIFT);
+
+ /* Check new pages against the bad-page list. */
p = opt_badpage;
while ( *p != '\0' )
{
@@ -177,28 +161,87 @@ unsigned long init_heap_allocator(
else if ( *p != '\0' )
break;
- if ( (bad_pfn < max_pages) && allocated_in_map(bad_pfn) )
+ if ( (bad_pfn < (bitmap_size*8)) && !allocated_in_map(bad_pfn) )
{
printk("Marking page %08lx as bad\n", bad_pfn);
- map_free(bad_pfn, 1);
+ map_alloc(bad_pfn, 1);
}
}
+}
- return bitmap_start + bitmap_size;
+unsigned long alloc_boot_pages(unsigned long size, unsigned long align)
+{
+ unsigned long pg, i;
+
+ size = round_pgup(size) >> PAGE_SHIFT;
+ align = round_pgup(align) >> PAGE_SHIFT;
+
+ for ( pg = 0; (pg + size) < (bitmap_size*PAGES_PER_MAPWORD); pg += align )
+ {
+ for ( i = 0; i < size; i++ )
+ if ( allocated_in_map(pg + i) )
+ break;
+
+ if ( i == size )
+ {
+ map_alloc(pg, size);
+ return pg << PAGE_SHIFT;
+ }
+ }
+
+ return 0;
}
+
+/*************************
+ * BINARY BUDDY ALLOCATOR
+ */
+
+#define MEMZONE_XEN 0
+#define MEMZONE_DOM 1
+#define NR_ZONES 2
+
+/* Up to 2^10 pages can be allocated at once. */
+#define MIN_ORDER 0
+#define MAX_ORDER 10
+#define NR_ORDERS (MAX_ORDER - MIN_ORDER + 1)
+static struct list_head heap[NR_ZONES][NR_ORDERS];
+
+static unsigned long avail[NR_ZONES];
+
+static spinlock_t heap_lock = SPIN_LOCK_UNLOCKED;
+
+void end_boot_allocator(void)
+{
+ unsigned long i, j;
+ int curr_free = 0, next_free = 0;
+
+ memset(avail, 0, sizeof(avail));
+
+ for ( i = 0; i < NR_ZONES; i++ )
+ for ( j = 0; j < NR_ORDERS; j++ )
+ INIT_LIST_HEAD(&heap[i][j]);
+
+ /* Pages that are free now go to the domain sub-allocator. */
+ for ( i = 0; i < max_page; i++ )
+ {
+ curr_free = next_free;
+ next_free = !allocated_in_map(i+1);
+ if ( next_free )
+ map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
+ if ( curr_free )
+ free_heap_pages(MEMZONE_DOM, pfn_to_page(i), 0);
+ }
+}
+
/* Hand the specified arbitrary page range to the specified heap zone. */
void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages)
{
- unsigned long i, pfn = page_to_pfn(pg);
+ unsigned long i;
- /* Process each page in turn, skipping bad pages. */
for ( i = 0; i < nr_pages; i++ )
- {
- if ( likely(allocated_in_map(pfn+i)) ) /* bad page? */
- free_heap_pages(zone, pg+i, 0);
- }
+ free_heap_pages(zone, pg+i, 0);
}
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index edffdb1c3b..5166c3f484 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -161,7 +161,7 @@ extern void __out_of_line_bug(int line) __attribute__((noreturn));
#elif defined(__i386__)
#define XENHEAP_DEFAULT_MB (12)
-#define DIRECTMAP_PHYS_END (40*1024*1024)
+#define DIRECTMAP_PHYS_END (12*1024*1024)
/* Hypervisor owns top 64MB of virtual address space. */
#define __HYPERVISOR_VIRT_START 0xFC000000
@@ -173,17 +173,19 @@ extern void __out_of_line_bug(int line) __attribute__((noreturn));
*/
#define RO_MPT_VIRT_START (HYPERVISOR_VIRT_START)
#define RO_MPT_VIRT_END (RO_MPT_VIRT_START + (4*1024*1024))
-/* The virtual addresses for the 40MB direct-map region. */
+/* Xen heap extends to end of 1:1 direct-mapped memory region. */
#define DIRECTMAP_VIRT_START (RO_MPT_VIRT_END)
#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + DIRECTMAP_PHYS_END)
#define XENHEAP_VIRT_START (DIRECTMAP_VIRT_START)
-#define XENHEAP_VIRT_END (XENHEAP_VIRT_START + (XENHEAP_DEFAULT_MB<<20))
+#define XENHEAP_VIRT_END (DIRECTMAP_VIRT_END)
+/* Machine-to-phys conversion table. */
#define RDWR_MPT_VIRT_START (XENHEAP_VIRT_END)
#define RDWR_MPT_VIRT_END (RDWR_MPT_VIRT_START + (4*1024*1024))
+/* Variable-length page-frame information array. */
#define FRAMETABLE_VIRT_START (RDWR_MPT_VIRT_END)
-#define FRAMETABLE_VIRT_END (DIRECTMAP_VIRT_END)
+#define FRAMETABLE_VIRT_END (FRAMETABLE_VIRT_START + (24*1024*1024))
/* Next 4MB of virtual address space is used as a linear p.t. mapping. */
-#define LINEAR_PT_VIRT_START (DIRECTMAP_VIRT_END)
+#define LINEAR_PT_VIRT_START (FRAMETABLE_VIRT_END)
#define LINEAR_PT_VIRT_END (LINEAR_PT_VIRT_START + (4*1024*1024))
/* Next 4MB of virtual address space is used as a shadow linear p.t. map. */
#define SH_LINEAR_PT_VIRT_START (LINEAR_PT_VIRT_END)
diff --git a/xen/include/asm-x86/e820.h b/xen/include/asm-x86/e820.h
index 0767642570..52d342b523 100644
--- a/xen/include/asm-x86/e820.h
+++ b/xen/include/asm-x86/e820.h
@@ -27,7 +27,7 @@ extern struct e820map e820;
#endif /*!__ASSEMBLY__*/
-#define PFN_DOWN(_p) ((_p)&PAGE_MASK)
-#define PFN_UP(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#endif /*__E820_HEADER*/
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 87ffe1ecc1..529ecbfc5c 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -118,7 +118,7 @@ struct pfn_info
extern struct pfn_info *frame_table;
extern unsigned long frame_table_size;
extern unsigned long max_page;
-void init_frametable(void *frametable_vstart, unsigned long nr_pages);
+void init_frametable(void);
int alloc_page_type(struct pfn_info *page, unsigned int type);
void free_page_type(struct pfn_info *page, unsigned int type);
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index bf4011028c..c016f4b5e0 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -102,6 +102,7 @@ typedef struct { unsigned long pt_lo; } pagetable_t;
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT))
+#define pfn_to_page(_pfn) (frame_table + (_pfn))
#define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
#define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - frame_table) < max_mapnr)
diff --git a/xen/include/xen/lib.h b/xen/include/xen/lib.h
index bfbce710ae..6ce764b7fc 100644
--- a/xen/include/xen/lib.h
+++ b/xen/include/xen/lib.h
@@ -14,8 +14,7 @@
#define SWAP(_a, _b) \
do { typeof(_a) _t = (_a); (_a) = (_b); (_b) = _t; } while ( 0 )
-#define reserve_bootmem(_p,_l) \
-printk("Memory Reservation 0x%lx, %lu bytes\n", (_p), (_l))
+#define reserve_bootmem(_p,_l) ((void)0)
struct domain;
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 79a3f72e2d..21184b3a43 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -5,9 +5,13 @@
struct domain;
struct pfn_info;
+/* Boot-time allocator. Turns into generic allocator after bootstrap. */
+unsigned long init_boot_allocator(unsigned long bitmap_start);
+void init_boot_pages(unsigned long ps, unsigned long pe);
+unsigned long alloc_boot_pages(unsigned long size, unsigned long align);
+void end_boot_allocator(void);
+
/* Generic allocator. These functions are *not* interrupt-safe. */
-unsigned long init_heap_allocator(
- unsigned long bitmap_start, unsigned long max_pages);
void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages);
struct pfn_info *alloc_heap_pages(int zone, int order);
void free_heap_pages(int zone, struct pfn_info *pg, int order);