From 531616bf98610766d45f9747f737db0c26c8207a Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Wed, 22 Nov 2006 10:11:36 +0000 Subject: [MINIOS] Refactored mm.c and sched.c. x86 arch specific code got moved to arch/x86/mm.c and arch/x86/sched.c. Header files were also refactored: arch specific code got moved to include/x86/arch_mm.h and include/x86/sched_mm.h. Signed-off-by: Dietmar Hahn Signed-off-by: Grzegorz Milos --- extras/mini-os/mm.c | 376 +--------------------------------------------------- 1 file changed, 6 insertions(+), 370 deletions(-) (limited to 'extras/mini-os/mm.c') diff --git a/extras/mini-os/mm.c b/extras/mini-os/mm.c index 85f9e92216..a24a521cf6 100644 --- a/extras/mini-os/mm.c +++ b/extras/mini-os/mm.c @@ -48,10 +48,6 @@ #define DEBUG(_f, _a...) ((void)0) #endif -unsigned long *phys_to_machine_mapping; -extern char *stack; -extern void page_walk(unsigned long virt_addr); - /********************* * ALLOCATION BITMAP * One bit per page of memory. Bit set => page is allocated. @@ -226,11 +222,11 @@ static void init_page_allocator(unsigned long min, unsigned long max) /* All allocated by default. */ memset(alloc_bitmap, ~0, bitmap_size); /* Free up the memory we've been given to play with. */ - map_free(min>>PAGE_SHIFT, range>>PAGE_SHIFT); + map_free(PHYS_PFN(min), range>>PAGE_SHIFT); /* The buddy lists are addressed in high memory. */ - min += VIRT_START; - max += VIRT_START; + min = (unsigned long) to_virt(min); + max = (unsigned long) to_virt(max); while ( range != 0 ) { @@ -297,7 +293,7 @@ unsigned long alloc_pages(int order) free_head[i] = spare_ch; } - map_alloc(to_phys(alloc_ch)>>PAGE_SHIFT, 1<= - l4_table_offset(hyp_virt_start) && - l4_table_offset(virt_address) <= - l4_table_offset(hyp_virt_end)) - return 0; - return 1; - } else -#endif - -#if defined(__x86_64__) || defined(CONFIG_X86_PAE) - if(level == L2_FRAME) - { -#if defined(__x86_64__) - if(l4_table_offset(virt_address) >= - l4_table_offset(hyp_virt_start) && - l4_table_offset(virt_address) <= - l4_table_offset(hyp_virt_end)) -#endif - if(l3_table_offset(virt_address) >= - l3_table_offset(hyp_virt_start) && - l3_table_offset(virt_address) <= - l3_table_offset(hyp_virt_end)) - return 0; - - return 1; - } else -#endif /* defined(__x86_64__) || defined(CONFIG_X86_PAE) */ - - /* Always need l1 frames */ - if(level == L1_FRAME) - return 1; - - printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n", - level, hyp_virt_start, hyp_virt_end); - return -1; -} - -void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn) -{ - unsigned long start_address, end_address; - unsigned long pfn_to_map, pt_pfn = *start_pfn; - static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1]; - pgentry_t *tab = (pgentry_t *)start_info.pt_base, page; - unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base)); - unsigned long offset; - int count = 0; - - pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES; - - if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START)) - { - printk("WARNING: Mini-OS trying to use Xen virtual space. " - "Truncating memory from %dMB to ", - ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20); - *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE); - printk("%dMB\n", - ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20); - } - - start_address = (unsigned long)pfn_to_virt(pfn_to_map); - end_address = (unsigned long)pfn_to_virt(*max_pfn); - - /* We worked out the virtual memory range to map, now mapping loop */ - printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address); - - while(start_address < end_address) - { - tab = (pgentry_t *)start_info.pt_base; - mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base)); - -#if defined(__x86_64__) - offset = l4_table_offset(start_address); - /* Need new L3 pt frame */ - if(!(start_address & L3_MASK)) - if(need_pt_frame(start_address, L3_FRAME)) - new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME); - - page = tab[offset]; - mfn = pte_to_mfn(page); - tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); -#endif -#if defined(__x86_64__) || defined(CONFIG_X86_PAE) - offset = l3_table_offset(start_address); - /* Need new L2 pt frame */ - if(!(start_address & L2_MASK)) - if(need_pt_frame(start_address, L2_FRAME)) - new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME); - - page = tab[offset]; - mfn = pte_to_mfn(page); - tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); -#endif - offset = l2_table_offset(start_address); - /* Need new L1 pt frame */ - if(!(start_address & L1_MASK)) - if(need_pt_frame(start_address, L1_FRAME)) - new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME); - - page = tab[offset]; - mfn = pte_to_mfn(page); - offset = l1_table_offset(start_address); - - mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset; - mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT; - count++; - if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn) - { - if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0) - { - printk("PTE could not be updated\n"); - do_exit(); - } - count = 0; - } - start_address += PAGE_SIZE; - } - *start_pfn = pt_pfn; -} - - -void mem_test(unsigned long *start_add, unsigned long *end_add) -{ - unsigned long mask = 0x10000; - unsigned long *pointer; - - for(pointer = start_add; pointer < end_add; pointer++) - { - if(!(((unsigned long)pointer) & 0xfffff)) - { - printk("Writing to %lx\n", pointer); - page_walk((unsigned long)pointer); - } - *pointer = (unsigned long)pointer & ~mask; - } - - for(pointer = start_add; pointer < end_add; pointer++) - { - if(((unsigned long)pointer & ~mask) != *pointer) - printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n", - (unsigned long)pointer, - *pointer, - ((unsigned long)pointer & ~mask)); - } - -} - -static pgentry_t *demand_map_pgt; -static void *demand_map_area_start; - -static void init_demand_mapping_area(unsigned long max_pfn) -{ - unsigned long mfn; - pgentry_t *tab; - unsigned long start_addr; - unsigned long pt_pfn; - unsigned offset; - - /* Round up to four megs. + 1024 rather than + 1023 since we want - to be sure we don't end up in the same place we started. */ - max_pfn = (max_pfn + L1_PAGETABLE_ENTRIES) & ~(L1_PAGETABLE_ENTRIES - 1); - if (max_pfn == 0 || - (unsigned long)pfn_to_virt(max_pfn + L1_PAGETABLE_ENTRIES) >= - HYPERVISOR_VIRT_START) { - printk("Too much memory; no room for demand map hole.\n"); - do_exit(); - } - - demand_map_area_start = pfn_to_virt(max_pfn); - printk("Demand map pfns start at %lx (%p).\n", max_pfn, - demand_map_area_start); - start_addr = (unsigned long)demand_map_area_start; - - tab = (pgentry_t *)start_info.pt_base; - mfn = virt_to_mfn(start_info.pt_base); - pt_pfn = virt_to_pfn(alloc_page()); - -#if defined(__x86_64__) - offset = l4_table_offset(start_addr); - if (!(tab[offset] & _PAGE_PRESENT)) { - new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME); - pt_pfn = virt_to_pfn(alloc_page()); - } - ASSERT(tab[offset] & _PAGE_PRESENT); - mfn = pte_to_mfn(tab[offset]); - tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); -#endif -#if defined(__x86_64__) || defined(CONFIG_X86_PAE) - offset = l3_table_offset(start_addr); - if (!(tab[offset] & _PAGE_PRESENT)) { - new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME); - pt_pfn = virt_to_pfn(alloc_page()); - } - ASSERT(tab[offset] & _PAGE_PRESENT); - mfn = pte_to_mfn(tab[offset]); - tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); -#endif - offset = l2_table_offset(start_addr); - if (tab[offset] & _PAGE_PRESENT) { - printk("Demand map area already has a page table covering it?\n"); - BUG(); - } - demand_map_pgt = pfn_to_virt(pt_pfn); - new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME); - ASSERT(tab[offset] & _PAGE_PRESENT); -} - -void *map_frames(unsigned long *f, unsigned long n) -{ - unsigned long x; - unsigned long y = 0; - mmu_update_t mmu_updates[16]; - int rc; - - if (n > 16) { - printk("Tried to map too many (%ld) frames at once.\n", n); - return NULL; - } - - /* Find a run of n contiguous frames */ - for (x = 0; x <= 1024 - n; x += y + 1) { - for (y = 0; y < n; y++) - if (demand_map_pgt[x+y] & _PAGE_PRESENT) - break; - if (y == n) - break; - } - if (y != n) { - printk("Failed to map %ld frames!\n", n); - return NULL; - } - - /* Found it at x. Map it in. */ - for (y = 0; y < n; y++) { - mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + y]); - mmu_updates[y].val = (f[y] << PAGE_SHIFT) | L1_PROT; - } - - rc = HYPERVISOR_mmu_update(mmu_updates, n, NULL, DOMID_SELF); - if (rc < 0) { - printk("Map %ld failed: %d.\n", n, rc); - return NULL; - } else { - return (void *)(unsigned long)((unsigned long)demand_map_area_start + - x * PAGE_SIZE); - } -} void init_mm(void) { @@ -717,22 +369,7 @@ void init_mm(void) printk("MM: Init\n"); - printk(" _text: %p\n", &_text); - printk(" _etext: %p\n", &_etext); - printk(" _edata: %p\n", &_edata); - printk(" stack start: %p\n", &stack); - printk(" _end: %p\n", &_end); - - /* First page follows page table pages and 3 more pages (store page etc) */ - start_pfn = PFN_UP(to_phys(start_info.pt_base)) + - start_info.nr_pt_frames + 3; - max_pfn = start_info.nr_pages; - - printk(" start_pfn: %lx\n", start_pfn); - printk(" max_pfn: %lx\n", max_pfn); - - build_pagetable(&start_pfn, &max_pfn); - + arch_init_mm(&start_pfn, &max_pfn); /* * now we can initialise the page allocator */ @@ -742,8 +379,7 @@ void init_mm(void) init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn)); printk("MM: done\n"); - init_demand_mapping_area(max_pfn); - printk("Initialised demand area.\n"); + arch_init_demand_mapping_area(max_pfn); } void sanity_check(void) -- cgit v1.2.3