aboutsummaryrefslogtreecommitdiffstats
path: root/extras/mini-os/mm.c
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-11-22 10:11:36 +0000
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-11-22 10:11:36 +0000
commit531616bf98610766d45f9747f737db0c26c8207a (patch)
treeb134d36e69e9baeb619015370a08c77e46e1efa4 /extras/mini-os/mm.c
parent47186797ce247ec18bcf8c95a7384972cd4aa0f3 (diff)
downloadxen-531616bf98610766d45f9747f737db0c26c8207a.tar.gz
xen-531616bf98610766d45f9747f737db0c26c8207a.tar.bz2
xen-531616bf98610766d45f9747f737db0c26c8207a.zip
[MINIOS] Refactored mm.c and sched.c. x86 arch specific code got moved to
arch/x86/mm.c and arch/x86/sched.c. Header files were also refactored: arch specific code got moved to include/x86/arch_mm.h and include/x86/sched_mm.h. Signed-off-by: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com> Signed-off-by: Grzegorz Milos <gm281@cam.ac.uk>
Diffstat (limited to 'extras/mini-os/mm.c')
-rw-r--r--extras/mini-os/mm.c376
1 files changed, 6 insertions, 370 deletions
diff --git a/extras/mini-os/mm.c b/extras/mini-os/mm.c
index 85f9e92216..a24a521cf6 100644
--- a/extras/mini-os/mm.c
+++ b/extras/mini-os/mm.c
@@ -48,10 +48,6 @@
#define DEBUG(_f, _a...) ((void)0)
#endif
-unsigned long *phys_to_machine_mapping;
-extern char *stack;
-extern void page_walk(unsigned long virt_addr);
-
/*********************
* ALLOCATION BITMAP
* One bit per page of memory. Bit set => page is allocated.
@@ -226,11 +222,11 @@ static void init_page_allocator(unsigned long min, unsigned long max)
/* All allocated by default. */
memset(alloc_bitmap, ~0, bitmap_size);
/* Free up the memory we've been given to play with. */
- map_free(min>>PAGE_SHIFT, range>>PAGE_SHIFT);
+ map_free(PHYS_PFN(min), range>>PAGE_SHIFT);
/* The buddy lists are addressed in high memory. */
- min += VIRT_START;
- max += VIRT_START;
+ min = (unsigned long) to_virt(min);
+ max = (unsigned long) to_virt(max);
while ( range != 0 )
{
@@ -297,7 +293,7 @@ unsigned long alloc_pages(int order)
free_head[i] = spare_ch;
}
- map_alloc(to_phys(alloc_ch)>>PAGE_SHIFT, 1<<order);
+ map_alloc(PHYS_PFN(to_phys(alloc_ch)), 1<<order);
return((unsigned long)alloc_ch);
@@ -365,350 +361,6 @@ void free_pages(void *pointer, int order)
}
-void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
- unsigned long offset, unsigned long level)
-{
- pgentry_t *tab = (pgentry_t *)start_info.pt_base;
- unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
- unsigned long prot_e, prot_t, pincmd;
- mmu_update_t mmu_updates[1];
- struct mmuext_op pin_request;
-
- DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
- "prev_l_mfn=%lx, offset=%lx",
- level, *pt_pfn, prev_l_mfn, offset);
-
- /* We need to clear the page, otherwise we might fail to map it
- as a page table page */
- memset((unsigned long*)pfn_to_virt(*pt_pfn), 0, PAGE_SIZE);
-
- switch ( level )
- {
- case L1_FRAME:
- prot_e = L1_PROT;
- prot_t = L2_PROT;
- pincmd = MMUEXT_PIN_L1_TABLE;
- break;
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
- case L2_FRAME:
- prot_e = L2_PROT;
- prot_t = L3_PROT;
- pincmd = MMUEXT_PIN_L2_TABLE;
- break;
-#endif
-#if defined(__x86_64__)
- case L3_FRAME:
- prot_e = L3_PROT;
- prot_t = L4_PROT;
- pincmd = MMUEXT_PIN_L3_TABLE;
- break;
-#endif
- default:
- printk("new_pt_frame() called with invalid level number %d\n", level);
- do_exit();
- break;
- }
- /* Update the entry */
-#if defined(__x86_64__)
- tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
- tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
-#endif
-#if defined(CONFIG_X86_PAE)
- tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
-#endif
-
- mmu_updates[0].ptr = ((pgentry_t)tab[l2_table_offset(pt_page)] & PAGE_MASK) +
- sizeof(pgentry_t) * l1_table_offset(pt_page);
- mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
- (prot_e & ~_PAGE_RW);
- if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
- {
- printk("PTE for new page table page could not be updated\n");
- do_exit();
- }
-
- /* Pin the page to provide correct protection */
- pin_request.cmd = pincmd;
- pin_request.arg1.mfn = pfn_to_mfn(*pt_pfn);
- if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
- {
- printk("ERROR: pinning failed\n");
- do_exit();
- }
-
- /* Now fill the new page table page with entries.
- Update the page directory as well. */
- mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
- mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
- if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
- {
- printk("ERROR: mmu_update failed\n");
- do_exit();
- }
- *pt_pfn += 1;
-}
-
-/* Checks if a pagetable frame is needed (if weren't allocated by Xen) */
-static int need_pt_frame(unsigned long virt_address, int level)
-{
- unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
-#if defined(__x86_64__)
- unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
-#else
- unsigned long hyp_virt_end = 0xffffffff;
-#endif
-
- /* In general frames will _not_ be needed if they were already
- allocated to map the hypervisor into our VA space */
-#if defined(__x86_64__)
- if(level == L3_FRAME)
- {
- if(l4_table_offset(virt_address) >=
- l4_table_offset(hyp_virt_start) &&
- l4_table_offset(virt_address) <=
- l4_table_offset(hyp_virt_end))
- return 0;
- return 1;
- } else
-#endif
-
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
- if(level == L2_FRAME)
- {
-#if defined(__x86_64__)
- if(l4_table_offset(virt_address) >=
- l4_table_offset(hyp_virt_start) &&
- l4_table_offset(virt_address) <=
- l4_table_offset(hyp_virt_end))
-#endif
- if(l3_table_offset(virt_address) >=
- l3_table_offset(hyp_virt_start) &&
- l3_table_offset(virt_address) <=
- l3_table_offset(hyp_virt_end))
- return 0;
-
- return 1;
- } else
-#endif /* defined(__x86_64__) || defined(CONFIG_X86_PAE) */
-
- /* Always need l1 frames */
- if(level == L1_FRAME)
- return 1;
-
- printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n",
- level, hyp_virt_start, hyp_virt_end);
- return -1;
-}
-
-void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
-{
- unsigned long start_address, end_address;
- unsigned long pfn_to_map, pt_pfn = *start_pfn;
- static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
- pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
- unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
- unsigned long offset;
- int count = 0;
-
- pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
-
- if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
- {
- printk("WARNING: Mini-OS trying to use Xen virtual space. "
- "Truncating memory from %dMB to ",
- ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
- *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
- printk("%dMB\n",
- ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
- }
-
- start_address = (unsigned long)pfn_to_virt(pfn_to_map);
- end_address = (unsigned long)pfn_to_virt(*max_pfn);
-
- /* We worked out the virtual memory range to map, now mapping loop */
- printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
-
- while(start_address < end_address)
- {
- tab = (pgentry_t *)start_info.pt_base;
- mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
-
-#if defined(__x86_64__)
- offset = l4_table_offset(start_address);
- /* Need new L3 pt frame */
- if(!(start_address & L3_MASK))
- if(need_pt_frame(start_address, L3_FRAME))
- new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
-
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
- offset = l3_table_offset(start_address);
- /* Need new L2 pt frame */
- if(!(start_address & L2_MASK))
- if(need_pt_frame(start_address, L2_FRAME))
- new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
-
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
- offset = l2_table_offset(start_address);
- /* Need new L1 pt frame */
- if(!(start_address & L1_MASK))
- if(need_pt_frame(start_address, L1_FRAME))
- new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
-
- page = tab[offset];
- mfn = pte_to_mfn(page);
- offset = l1_table_offset(start_address);
-
- mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
- mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
- count++;
- if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
- {
- if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
- {
- printk("PTE could not be updated\n");
- do_exit();
- }
- count = 0;
- }
- start_address += PAGE_SIZE;
- }
- *start_pfn = pt_pfn;
-}
-
-
-void mem_test(unsigned long *start_add, unsigned long *end_add)
-{
- unsigned long mask = 0x10000;
- unsigned long *pointer;
-
- for(pointer = start_add; pointer < end_add; pointer++)
- {
- if(!(((unsigned long)pointer) & 0xfffff))
- {
- printk("Writing to %lx\n", pointer);
- page_walk((unsigned long)pointer);
- }
- *pointer = (unsigned long)pointer & ~mask;
- }
-
- for(pointer = start_add; pointer < end_add; pointer++)
- {
- if(((unsigned long)pointer & ~mask) != *pointer)
- printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n",
- (unsigned long)pointer,
- *pointer,
- ((unsigned long)pointer & ~mask));
- }
-
-}
-
-static pgentry_t *demand_map_pgt;
-static void *demand_map_area_start;
-
-static void init_demand_mapping_area(unsigned long max_pfn)
-{
- unsigned long mfn;
- pgentry_t *tab;
- unsigned long start_addr;
- unsigned long pt_pfn;
- unsigned offset;
-
- /* Round up to four megs. + 1024 rather than + 1023 since we want
- to be sure we don't end up in the same place we started. */
- max_pfn = (max_pfn + L1_PAGETABLE_ENTRIES) & ~(L1_PAGETABLE_ENTRIES - 1);
- if (max_pfn == 0 ||
- (unsigned long)pfn_to_virt(max_pfn + L1_PAGETABLE_ENTRIES) >=
- HYPERVISOR_VIRT_START) {
- printk("Too much memory; no room for demand map hole.\n");
- do_exit();
- }
-
- demand_map_area_start = pfn_to_virt(max_pfn);
- printk("Demand map pfns start at %lx (%p).\n", max_pfn,
- demand_map_area_start);
- start_addr = (unsigned long)demand_map_area_start;
-
- tab = (pgentry_t *)start_info.pt_base;
- mfn = virt_to_mfn(start_info.pt_base);
- pt_pfn = virt_to_pfn(alloc_page());
-
-#if defined(__x86_64__)
- offset = l4_table_offset(start_addr);
- if (!(tab[offset] & _PAGE_PRESENT)) {
- new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
- pt_pfn = virt_to_pfn(alloc_page());
- }
- ASSERT(tab[offset] & _PAGE_PRESENT);
- mfn = pte_to_mfn(tab[offset]);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
- offset = l3_table_offset(start_addr);
- if (!(tab[offset] & _PAGE_PRESENT)) {
- new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
- pt_pfn = virt_to_pfn(alloc_page());
- }
- ASSERT(tab[offset] & _PAGE_PRESENT);
- mfn = pte_to_mfn(tab[offset]);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
- offset = l2_table_offset(start_addr);
- if (tab[offset] & _PAGE_PRESENT) {
- printk("Demand map area already has a page table covering it?\n");
- BUG();
- }
- demand_map_pgt = pfn_to_virt(pt_pfn);
- new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
- ASSERT(tab[offset] & _PAGE_PRESENT);
-}
-
-void *map_frames(unsigned long *f, unsigned long n)
-{
- unsigned long x;
- unsigned long y = 0;
- mmu_update_t mmu_updates[16];
- int rc;
-
- if (n > 16) {
- printk("Tried to map too many (%ld) frames at once.\n", n);
- return NULL;
- }
-
- /* Find a run of n contiguous frames */
- for (x = 0; x <= 1024 - n; x += y + 1) {
- for (y = 0; y < n; y++)
- if (demand_map_pgt[x+y] & _PAGE_PRESENT)
- break;
- if (y == n)
- break;
- }
- if (y != n) {
- printk("Failed to map %ld frames!\n", n);
- return NULL;
- }
-
- /* Found it at x. Map it in. */
- for (y = 0; y < n; y++) {
- mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + y]);
- mmu_updates[y].val = (f[y] << PAGE_SHIFT) | L1_PROT;
- }
-
- rc = HYPERVISOR_mmu_update(mmu_updates, n, NULL, DOMID_SELF);
- if (rc < 0) {
- printk("Map %ld failed: %d.\n", n, rc);
- return NULL;
- } else {
- return (void *)(unsigned long)((unsigned long)demand_map_area_start +
- x * PAGE_SIZE);
- }
-}
void init_mm(void)
{
@@ -717,22 +369,7 @@ void init_mm(void)
printk("MM: Init\n");
- printk(" _text: %p\n", &_text);
- printk(" _etext: %p\n", &_etext);
- printk(" _edata: %p\n", &_edata);
- printk(" stack start: %p\n", &stack);
- printk(" _end: %p\n", &_end);
-
- /* First page follows page table pages and 3 more pages (store page etc) */
- start_pfn = PFN_UP(to_phys(start_info.pt_base)) +
- start_info.nr_pt_frames + 3;
- max_pfn = start_info.nr_pages;
-
- printk(" start_pfn: %lx\n", start_pfn);
- printk(" max_pfn: %lx\n", max_pfn);
-
- build_pagetable(&start_pfn, &max_pfn);
-
+ arch_init_mm(&start_pfn, &max_pfn);
/*
* now we can initialise the page allocator
*/
@@ -742,8 +379,7 @@ void init_mm(void)
init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn));
printk("MM: done\n");
- init_demand_mapping_area(max_pfn);
- printk("Initialised demand area.\n");
+ arch_init_demand_mapping_area(max_pfn);
}
void sanity_check(void)