From ddc87ed30206661209945c82d32b799e0fcbf6fe Mon Sep 17 00:00:00 2001 From: "kaf24@scramble.cl.cam.ac.uk" Date: Thu, 1 Apr 2004 12:00:12 +0000 Subject: bitkeeper revision 1.825.3.23 (406c044cT7cMBER-dmrtu4_WyL1Jjg) page_alloc.c: Fix nasty bug in Xen heap allocator. --- xen/common/page_alloc.c | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index adef28ea32..0292a7eea9 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -56,6 +56,13 @@ static void map_alloc(unsigned long first_page, unsigned long nr_pages) { unsigned long start_off, end_off, curr_idx, end_idx; +#ifndef NDEBUG + unsigned long i; + /* Check that the block isn't already allocated. */ + for ( i = 0; i < nr_pages; i++ ) + ASSERT(!allocated_in_map(first_page + i)); +#endif + curr_idx = first_page / PAGES_PER_MAPWORD; start_off = first_page & (PAGES_PER_MAPWORD-1); end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD; @@ -78,6 +85,13 @@ static void map_free(unsigned long first_page, unsigned long nr_pages) { unsigned long start_off, end_off, curr_idx, end_idx; +#ifndef NDEBUG + unsigned long i; + /* Check that the block isn't already freed. */ + for ( i = 0; i < nr_pages; i++ ) + ASSERT(allocated_in_map(first_page + i)); +#endif + curr_idx = first_page / PAGES_PER_MAPWORD; start_off = first_page & (PAGES_PER_MAPWORD-1); end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD; @@ -227,7 +241,7 @@ void __init init_page_allocator(unsigned long min, unsigned long max) /* Allocate space for the allocation bitmap. */ bitmap_size = (max+1) >> (PAGE_SHIFT+3); bitmap_size = round_pgup(bitmap_size); - alloc_bitmap = (unsigned long *)__va(min); + alloc_bitmap = (unsigned long *)phys_to_virt(min); min += bitmap_size; range = max - min; @@ -240,6 +254,8 @@ void __init init_page_allocator(unsigned long min, unsigned long max) min += PAGE_OFFSET; max += PAGE_OFFSET; + printk("Initialising Xen allocator with %luMB memory\n", range >> 20); + p = min; remaining = range; while ( remaining != 0 ) @@ -315,7 +331,7 @@ retry: GUARD(spare_ch, i); } - map_alloc(__pa(alloc_ch)>>PAGE_SHIFT, 1<>PAGE_SHIFT, 1<> PAGE_SHIFT; + unsigned long pfn = virt_to_phys((void *)p) >> PAGE_SHIFT; spin_lock_irqsave(&alloc_lock, flags); #ifdef MEMORY_GUARD - /* Check that the block isn't already freed. */ - if ( !allocated_in_map(pagenr) ) - BUG(); /* Check that the block isn't already guarded. */ if ( __put_user(1, (int*)p) ) BUG(); @@ -364,7 +377,7 @@ void __free_pages(unsigned long p, int order) memset((void *)p, 0xaa, size); #endif - map_free(pagenr, 1<