aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorakw27@labyrinth.cl.cam.ac.uk <akw27@labyrinth.cl.cam.ac.uk>2003-01-27 12:05:24 +0000
committerakw27@labyrinth.cl.cam.ac.uk <akw27@labyrinth.cl.cam.ac.uk>2003-01-27 12:05:24 +0000
commitd5d7407045c7b2ba4ea8f0ffed5910184c6848db (patch)
tree612c4078b56f159f992f5eb3d8c55da2d6566a22
parent091f818b46daa9b41e359d2f79de286bb0a276a7 (diff)
parent76858e72974578745ce60257367fd34e21a08c30 (diff)
downloadxen-d5d7407045c7b2ba4ea8f0ffed5910184c6848db.tar.gz
xen-d5d7407045c7b2ba4ea8f0ffed5910184c6848db.tar.bz2
xen-d5d7407045c7b2ba4ea8f0ffed5910184c6848db.zip
bitkeeper revision 1.15.1.10 (3e352084u4K_4YmnX6nhqxaJQrO-hQ)
Merge labyrinth.cl.cam.ac.uk:/usr/groups/xeno/BK/xeno into labyrinth.cl.cam.ac.uk:/usr/groups/xeno/users/akw27/xeno
-rw-r--r--.rootkeys1
-rw-r--r--xen-2.4.16/Makefile1
-rw-r--r--xen-2.4.16/common/domain.c55
-rw-r--r--xen-2.4.16/common/domain_page.c67
-rw-r--r--xen-2.4.16/common/memory.c141
-rw-r--r--xen-2.4.16/include/asm-i386/domain_page.h16
6 files changed, 194 insertions, 87 deletions
diff --git a/.rootkeys b/.rootkeys
index 84ae52496a..206a24d82b 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -39,6 +39,7 @@
3ddb79bdrqnW93GR9gZk1OJe1qK-iQ xen-2.4.16/common/brlock.c
3ddb79bdLX_P6iB7ILiblRLWvebapg xen-2.4.16/common/dom0_ops.c
3ddb79bdYO5D8Av12NHqPeSviav7cg xen-2.4.16/common/domain.c
+3e32af9aRnYGl4GMOaDKp7JdfhOGhg xen-2.4.16/common/domain_page.c
3ddb79bdeyutmaXEfpQvvxj7eQ0fCw xen-2.4.16/common/event.c
3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen-2.4.16/common/kernel.c
3ddb79bduhSEZI8xa7IbGQCpap5y2A xen-2.4.16/common/lib.c
diff --git a/xen-2.4.16/Makefile b/xen-2.4.16/Makefile
index d057dcf7e0..846c0e6f12 100644
--- a/xen-2.4.16/Makefile
+++ b/xen-2.4.16/Makefile
@@ -24,7 +24,6 @@ $(TARGET): make-links
$(MAKE) -C net
$(MAKE) -C drivers
$(MAKE) -C arch/$(ARCH)
- gzip -f -9 < $(TARGET) > $(TARGET).gz
make-links:
ln -sf xeno include/linux
diff --git a/xen-2.4.16/common/domain.c b/xen-2.4.16/common/domain.c
index ea14a5ea21..1128cdfa15 100644
--- a/xen-2.4.16/common/domain.c
+++ b/xen-2.4.16/common/domain.c
@@ -410,7 +410,7 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
unsigned int ft_size = 0;
start_info_t *virt_startinfo_address;
unsigned long long time;
- l2_pgentry_t *l2tab;
+ l2_pgentry_t *l2tab, *l2start;
l1_pgentry_t *l1tab = NULL;
struct pfn_info *page = NULL;
net_ring_t *net_ring;
@@ -465,7 +465,7 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
* filled in by now !!
*/
phys_l2tab = ALLOC_FRAME_FROM_DOMAIN();
- l2tab = map_domain_mem(phys_l2tab);
+ l2start = l2tab = map_domain_mem(phys_l2tab);
memcpy(l2tab, idle_pg_table[p->processor], PAGE_SIZE);
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR);
@@ -486,17 +486,16 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
if(dom == 0)
ft_size = frame_table_size;
- phys_l2tab += l2_table_offset(virt_load_address)*sizeof(l2_pgentry_t);
+ l2tab += l2_table_offset(virt_load_address);
for ( cur_address = start_address;
cur_address != (end_address + PAGE_SIZE + ft_size);
cur_address += PAGE_SIZE )
{
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
+ if ( l1tab != NULL ) unmap_domain_mem(l1tab-1);
phys_l1tab = ALLOC_FRAME_FROM_DOMAIN();
- l2tab = map_domain_mem(phys_l2tab);
- *l2tab = mk_l2_pgentry(phys_l1tab|L2_PROT);
- phys_l2tab += sizeof(l2_pgentry_t);
+ *l2tab++ = mk_l2_pgentry(phys_l1tab|L2_PROT);
l1tab = map_domain_mem(phys_l1tab);
clear_page(l1tab);
l1tab += l1_table_offset(
@@ -512,43 +511,39 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
page->type_count = page->tot_count = 1;
}
}
+ unmap_domain_mem(l1tab-1);
/* Pages that are part of page tables must be read-only. */
vaddr = virt_load_address + alloc_address - start_address;
- phys_l2tab = pagetable_val(p->mm.pagetable) +
- (l2_table_offset(vaddr) * sizeof(l2_pgentry_t));
- l2tab = map_domain_mem(phys_l2tab);
- phys_l1tab = l2_pgentry_to_phys(*l2tab) +
- (l1_table_offset(vaddr) * sizeof(l1_pgentry_t));
- phys_l2tab += sizeof(l2_pgentry_t);
- l1tab = map_domain_mem(phys_l1tab);
+ l2tab = l2start + l2_table_offset(vaddr);
+ l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1tab += l1_table_offset(vaddr);
+ l2tab++;
for ( cur_address = alloc_address;
cur_address != end_address;
cur_address += PAGE_SIZE )
{
- *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
- l2tab = map_domain_mem(phys_l2tab);
- phys_l1tab = l2_pgentry_to_phys(*l2tab);
- phys_l2tab += sizeof(l2_pgentry_t);
- l1tab = map_domain_mem(phys_l1tab);
+ unmap_domain_mem(l1tab-1);
+ l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l2tab++;
}
+ *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
page = frame_table + (cur_address >> PAGE_SHIFT);
page->flags = dom | PGT_l1_page_table;
page->tot_count++;
}
+ unmap_domain_mem(l1tab-1);
page->flags = dom | PGT_l2_page_table;
/* Map in the the shared info structure. */
virt_shinfo_address = end_address - start_address + virt_load_address;
- phys_l2tab = pagetable_val(p->mm.pagetable) +
- (l2_table_offset(virt_shinfo_address) * sizeof(l2_pgentry_t));
- l2tab = map_domain_mem(phys_l2tab);
- phys_l1tab = l2_pgentry_to_phys(*l2tab) +
- (l1_table_offset(virt_shinfo_address) * sizeof(l1_pgentry_t));
- l1tab = map_domain_mem(phys_l1tab);
+ l2tab = l2start + l2_table_offset(virt_shinfo_address);
+ l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1tab += l1_table_offset(virt_shinfo_address);
*l1tab = mk_l1_pgentry(__pa(p->shared_info)|L1_PROT);
+ unmap_domain_mem(l1tab);
/* Set up shared info area. */
rdtscll(time);
@@ -565,13 +560,11 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
cur_address < virt_ftable_end_addr;
cur_address += PAGE_SIZE)
{
- phys_l2tab = pagetable_val(p->mm.pagetable) +
- (l2_table_offset(cur_address) * sizeof(l2_pgentry_t));
- l2tab = map_domain_mem(phys_l2tab);
- phys_l1tab = l2_pgentry_to_phys(*l2tab) +
- (l1_table_offset(cur_address) * sizeof(l1_pgentry_t));
- l1tab = map_domain_mem(phys_l1tab);
+ l2tab = l2start + l2_table_offset(cur_address);
+ l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1tab += l1_table_offset(cur_address);
*l1tab = mk_l1_pgentry(__pa(ft_mapping)|L1_PROT);
+ unmap_domain_mem(l1tab);
ft_mapping += PAGE_SIZE;
}
}
@@ -580,6 +573,8 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
(alloc_address - start_address - PAGE_SIZE + virt_load_address);
virt_stack_address = (unsigned long)virt_startinfo_address;
+ unmap_domain_mem(l2start);
+
/* Install the new page tables. */
__cli();
__write_cr3_counted(pagetable_val(p->mm.pagetable));
diff --git a/xen-2.4.16/common/domain_page.c b/xen-2.4.16/common/domain_page.c
new file mode 100644
index 0000000000..2e37b72c5c
--- /dev/null
+++ b/xen-2.4.16/common/domain_page.c
@@ -0,0 +1,67 @@
+/******************************************************************************
+ * domain_page.h
+ *
+ * Allow temporary mapping of domain pages. Based on ideas from the
+ * Linux PKMAP code -- the copyrights and credits are retained below.
+ */
+
+/*
+ * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
+ * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#include <xeno/config.h>
+#include <xeno/sched.h>
+#include <xeno/mm.h>
+#include <asm/domain_page.h>
+#include <asm/pgalloc.h>
+
+static unsigned int map_idx[NR_CPUS];
+
+/* Use a spare PTE bit to mark entries ready for recycling. */
+#define READY_FOR_TLB_FLUSH (1<<10)
+
+static void flush_all_ready_maps(void)
+{
+ unsigned long *cache = mapcache[smp_processor_id()];
+
+ /* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */
+ do { if ( (*cache & READY_FOR_TLB_FLUSH) ) *cache = 0; }
+ while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 );
+
+ local_flush_tlb();
+}
+
+
+void *map_domain_mem(unsigned long pa)
+{
+ unsigned long va;
+ int cpu = smp_processor_id();
+ unsigned int idx;
+ unsigned long *cache = mapcache[cpu];
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for ( ; ; )
+ {
+ idx = map_idx[cpu] = (map_idx[cpu] + 1) & (MAPCACHE_ENTRIES - 1);
+ if ( idx == 0 ) flush_all_ready_maps();
+ if ( cache[idx] == 0 ) break;
+ }
+
+ cache[idx] = (pa & PAGE_MASK) | PAGE_HYPERVISOR;
+
+ local_irq_restore(flags);
+
+ va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK);
+ return (void *)va;
+}
+
+void unmap_domain_mem(void *va)
+{
+ unsigned int idx;
+ idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
+ mapcache[smp_processor_id()][idx] |= READY_FOR_TLB_FLUSH;
+}
diff --git a/xen-2.4.16/common/memory.c b/xen-2.4.16/common/memory.c
index 836e27abac..2e6697a0b8 100644
--- a/xen-2.4.16/common/memory.c
+++ b/xen-2.4.16/common/memory.c
@@ -182,13 +182,17 @@
#define MEM_LOG(_f, _a...) ((void)0)
#endif
+/* Domain 0 is allowed to submit requests on behalf of others. */
+#define DOMAIN_OKAY(_f) \
+ ((((_f) & PG_domain_mask) == current->domain) || (current->domain == 0))
+
/* 'get' checks parameter for validity before inc'ing refcnt. */
static int get_l2_table(unsigned long page_nr);
static int get_l1_table(unsigned long page_nr);
static int get_page(unsigned long page_nr, int writeable);
static int inc_page_refcnt(unsigned long page_nr, unsigned int type);
/* 'put' does no checking because if refcnt not zero, entity must be valid. */
-static int put_l2_table(unsigned long page_nr);
+static void put_l2_table(unsigned long page_nr);
static void put_l1_table(unsigned long page_nr);
static void put_page(unsigned long page_nr, int writeable);
static int dec_page_refcnt(unsigned long page_nr, unsigned int type);
@@ -248,14 +252,14 @@ static int inc_page_refcnt(unsigned long page_nr, unsigned int type)
if ( page_nr >= max_page )
{
MEM_LOG("Page out of range (%08lx>%08lx)", page_nr, max_page);
- return(-1);
+ return -1;
}
page = frame_table + page_nr;
flags = page->flags;
- if ( (flags & PG_domain_mask) != current->domain )
+ if ( !DOMAIN_OKAY(flags) )
{
MEM_LOG("Bad page domain (%ld)", flags & PG_domain_mask);
- return(-1);
+ return -1;
}
if ( (flags & PG_type_mask) != type )
{
@@ -264,13 +268,13 @@ static int inc_page_refcnt(unsigned long page_nr, unsigned int type)
MEM_LOG("Page %08lx bad type/count (%08lx!=%08x) cnt=%ld",
page_nr << PAGE_SHIFT,
flags & PG_type_mask, type, page_type_count(page));
- return(-1);
+ return -1;
}
page->flags |= type;
}
get_page_tot(page);
- return(get_page_type(page));
+ return get_page_type(page);
}
/* Return new refcnt, or -1 on error. */
@@ -282,21 +286,46 @@ static int dec_page_refcnt(unsigned long page_nr, unsigned int type)
if ( page_nr >= max_page )
{
MEM_LOG("Page out of range (%08lx>%08lx)", page_nr, max_page);
- return(-1);
+ return -1;
}
page = frame_table + page_nr;
- if ( (page->flags & (PG_type_mask | PG_domain_mask)) !=
- (type | current->domain) )
+ if ( !DOMAIN_OKAY(page->flags) ||
+ ((page->flags & PG_type_mask) != type) )
{
MEM_LOG("Bad page type/domain (dom=%ld) (type %ld != expected %d)",
page->flags & PG_domain_mask, page->flags & PG_type_mask,
type);
- return(-1);
+ return -1;
}
ASSERT(page_type_count(page) != 0);
if ( (ret = put_page_type(page)) == 0 ) page->flags &= ~PG_type_mask;
put_page_tot(page);
- return(ret);
+ return ret;
+}
+
+
+/* We allow a L2 table to map itself, to achieve a linear pagetable. */
+/* NB. There's no need for a put_twisted_l2_table() function!! */
+static int get_twisted_l2_table(unsigned long entry_pfn, l2_pgentry_t l2e)
+{
+ unsigned long l2v = l2_pgentry_val(l2e);
+
+ /* Clearly the mapping must be read-only :-) */
+ if ( (l2v & _PAGE_RW) )
+ {
+ MEM_LOG("Attempt to install twisted L2 entry with write permissions");
+ return -1;
+ }
+
+ /* This is a sufficient final check. */
+ if ( (l2v >> PAGE_SHIFT) != entry_pfn )
+ {
+ MEM_LOG("L2 tables may not map _other_ L2 tables!\n");
+ return -1;
+ }
+
+ /* We don't bump the reference counts. */
+ return 0;
}
@@ -306,7 +335,7 @@ static int get_l2_table(unsigned long page_nr)
int i, ret=0;
ret = inc_page_refcnt(page_nr, PGT_l2_page_table);
- if ( ret != 0 ) return((ret < 0) ? ret : 0);
+ if ( ret != 0 ) return (ret < 0) ? ret : 0;
/* NEW level-2 page table! Deal with every PDE in the table. */
p_l2_entry = map_domain_mem(page_nr << PAGE_SHIFT);
@@ -318,12 +347,13 @@ static int get_l2_table(unsigned long page_nr)
{
MEM_LOG("Bad L2 page type settings %04lx",
l2_pgentry_val(l2_entry) & (_PAGE_GLOBAL|_PAGE_PSE));
- return(-1);
+ ret = -1;
+ goto out;
}
+ /* Assume we're mapping an L1 table, falling back to twisted L2. */
ret = get_l1_table(l2_pgentry_to_pagenr(l2_entry));
- if ( ret ) return(ret);
- p_l2_entry = map_domain_mem((page_nr << PAGE_SHIFT) +
- ((i+1) * sizeof(l2_pgentry_t)));
+ if ( ret ) ret = get_twisted_l2_table(page_nr, l2_entry);
+ if ( ret ) goto out;
}
/* Now we simply slap in our high mapping. */
@@ -334,7 +364,9 @@ static int get_l2_table(unsigned long page_nr)
DOMAIN_ENTRIES_PER_L2_PAGETABLE] =
mk_l2_pgentry(__pa(current->mm.perdomain_pt) | __PAGE_HYPERVISOR);
- return(ret);
+ out:
+ unmap_domain_mem(p_l2_entry);
+ return ret;
}
static int get_l1_table(unsigned long page_nr)
@@ -344,7 +376,7 @@ static int get_l1_table(unsigned long page_nr)
/* Update ref count for page pointed at by PDE. */
ret = inc_page_refcnt(page_nr, PGT_l1_page_table);
- if ( ret != 0 ) return((ret < 0) ? ret : 0);
+ if ( ret != 0 ) return (ret < 0) ? ret : 0;
/* NEW level-1 page table! Deal with every PTE in the table. */
p_l1_entry = map_domain_mem(page_nr << PAGE_SHIFT);
@@ -358,14 +390,18 @@ static int get_l1_table(unsigned long page_nr)
MEM_LOG("Bad L1 page type settings %04lx",
l1_pgentry_val(l1_entry) &
(_PAGE_GLOBAL|_PAGE_PAT));
- return(-1);
+ ret = -1;
+ goto out;
}
ret = get_page(l1_pgentry_to_pagenr(l1_entry),
l1_pgentry_val(l1_entry) & _PAGE_RW);
- if ( ret ) return(ret);
+ if ( ret ) goto out;
}
- return(ret);
+ out:
+ /* Make sure we unmap the right page! */
+ unmap_domain_mem(p_l1_entry-1);
+ return ret;
}
static int get_page(unsigned long page_nr, int writeable)
@@ -381,7 +417,7 @@ static int get_page(unsigned long page_nr, int writeable)
}
page = frame_table + page_nr;
flags = page->flags;
- if ( (flags & PG_domain_mask) != current->domain )
+ if ( !DOMAIN_OKAY(flags) )
{
MEM_LOG("Bad page domain (%ld)", flags & PG_domain_mask);
return(-1);
@@ -408,28 +444,23 @@ static int get_page(unsigned long page_nr, int writeable)
return(0);
}
-static int put_l2_table(unsigned long page_nr)
+static void put_l2_table(unsigned long page_nr)
{
l2_pgentry_t *p_l2_entry, l2_entry;
- int i, ret;
+ int i;
- ret = dec_page_refcnt(page_nr, PGT_l2_page_table);
- if ( ret != 0 ) return((ret < 0) ? ret : 0);
+ if ( dec_page_refcnt(page_nr, PGT_l2_page_table) ) return;
/* We had last reference to level-2 page table. Free the PDEs. */
p_l2_entry = map_domain_mem(page_nr << PAGE_SHIFT);
- for ( i = 0; i < HYPERVISOR_ENTRIES_PER_L2_PAGETABLE; i++ )
+ for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
{
l2_entry = *p_l2_entry++;
if ( (l2_pgentry_val(l2_entry) & _PAGE_PRESENT) )
- {
put_l1_table(l2_pgentry_to_pagenr(l2_entry));
- p_l2_entry = map_domain_mem((page_nr << PAGE_SHIFT) +
- ((i+1) * sizeof(l2_pgentry_t)));
- }
}
- return(0);
+ unmap_domain_mem(p_l2_entry);
}
static void put_l1_table(unsigned long page_nr)
@@ -437,7 +468,7 @@ static void put_l1_table(unsigned long page_nr)
l1_pgentry_t *p_l1_entry, l1_entry;
int i;
- if ( dec_page_refcnt(page_nr, PGT_l1_page_table) != 0 ) return;
+ if ( dec_page_refcnt(page_nr, PGT_l1_page_table) ) return;
/* We had last reference to level-1 page table. Free the PTEs. */
p_l1_entry = map_domain_mem(page_nr << PAGE_SHIFT);
@@ -450,6 +481,9 @@ static void put_l1_table(unsigned long page_nr)
l1_pgentry_val(l1_entry) & _PAGE_RW);
}
}
+
+ /* Make sure we unmap the right page! */
+ unmap_domain_mem(p_l1_entry-1);
}
static void put_page(unsigned long page_nr, int writeable)
@@ -457,7 +491,7 @@ static void put_page(unsigned long page_nr, int writeable)
struct pfn_info *page;
ASSERT(page_nr < max_page);
page = frame_table + page_nr;
- ASSERT((page->flags & PG_domain_mask) == current->domain);
+ ASSERT(DOMAIN_OKAY(page->flags));
ASSERT((!writeable) ||
((page_type_count(page) != 0) &&
((page->flags & PG_type_mask) == PGT_writeable_page)));
@@ -485,12 +519,6 @@ static int mod_l2_entry(unsigned long pa, l2_pgentry_t new_l2_entry)
goto fail;
}
- /*
- * Write the new value while pointer is still valid. The mapping cache
- * entry for p_l2_entry may get clobbered by {put,get}_l1_table.
- */
- *p_l2_entry = new_l2_entry;
-
if ( (l2_pgentry_val(new_l2_entry) & _PAGE_PRESENT) )
{
if ( (l2_pgentry_val(new_l2_entry) & (_PAGE_GLOBAL|_PAGE_PSE)) )
@@ -509,7 +537,9 @@ static int mod_l2_entry(unsigned long pa, l2_pgentry_t new_l2_entry)
put_l1_table(l2_pgentry_to_pagenr(old_l2_entry));
}
- if ( get_l1_table(l2_pgentry_to_pagenr(new_l2_entry)) )
+ /* Assume we're mapping an L1 table, falling back to twisted L2. */
+ if ( get_l1_table(l2_pgentry_to_pagenr(new_l2_entry)) &&
+ get_twisted_l2_table(pa >> PAGE_SHIFT, new_l2_entry) )
goto fail;
}
}
@@ -518,16 +548,13 @@ static int mod_l2_entry(unsigned long pa, l2_pgentry_t new_l2_entry)
put_l1_table(l2_pgentry_to_pagenr(old_l2_entry));
}
- return(0);
+ *p_l2_entry = new_l2_entry;
+ unmap_domain_mem(p_l2_entry);
+ return 0;
fail:
- /*
- * On failure we put the old value back. We need to regrab the
- * mapping of the physical page frame.
- */
- p_l2_entry = map_domain_mem(pa);
- *p_l2_entry = old_l2_entry;
- return(-1);
+ unmap_domain_mem(p_l2_entry);
+ return -1;
}
@@ -572,12 +599,13 @@ static int mod_l1_entry(unsigned long pa, l1_pgentry_t new_l1_entry)
l1_pgentry_val(old_l1_entry) & _PAGE_RW);
}
- /* p_l1_entry is still valid here */
*p_l1_entry = new_l1_entry;
+ unmap_domain_mem(p_l1_entry);
+ return 0;
- return(0);
fail:
- return(-1);
+ unmap_domain_mem(p_l1_entry);
+ return -1;
}
@@ -615,7 +643,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
break;
case PGEXT_UNPIN_TABLE:
- if ( (page->flags & PG_domain_mask) != current->domain )
+ if ( !DOMAIN_OKAY(page->flags) )
{
err = 1;
MEM_LOG("Page %08lx bad domain (dom=%ld)",
@@ -701,7 +729,7 @@ int do_process_page_updates(page_update_request_t *updates, int count)
case PGREQ_NORMAL:
page = frame_table + pfn;
flags = page->flags;
- if ( (flags & PG_domain_mask) == current->domain )
+ if ( DOMAIN_OKAY(flags) )
{
switch ( (flags & PG_type_mask) )
{
@@ -731,8 +759,9 @@ int do_process_page_updates(page_update_request_t *updates, int count)
flags = page->flags;
if ( (flags | current->domain) == PGT_l1_page_table )
{
-
- *(unsigned long *)map_domain_mem(cur.ptr) = cur.val;
+ unsigned long *va = map_domain_mem(cur.ptr);
+ *va = cur.val;
+ unmap_domain_mem(va);
err = 0;
}
else
diff --git a/xen-2.4.16/include/asm-i386/domain_page.h b/xen-2.4.16/include/asm-i386/domain_page.h
index a8e4c71013..bae558c377 100644
--- a/xen-2.4.16/include/asm-i386/domain_page.h
+++ b/xen-2.4.16/include/asm-i386/domain_page.h
@@ -9,6 +9,21 @@
extern unsigned long *mapcache[NR_CPUS];
#define MAPCACHE_ENTRIES 1024
+
+/*
+ * Maps a given physical address, returning corresponding virtual address.
+ * The entire page containing that VA is now accessible until a
+ * corresponding call to unmap_domain_mem().
+ */
+extern void *map_domain_mem(unsigned long pa);
+
+/*
+ * Pass a VA within a page previously mapped with map_domain_mem().
+ * That page will then be removed from the mapping lists.
+ */
+extern void unmap_domain_mem(void *va);
+
+#if 0
#define MAPCACHE_HASH(_pfn) ((_pfn) & (MAPCACHE_ENTRIES-1))
static inline void *map_domain_mem(unsigned long pa)
{
@@ -25,3 +40,4 @@ static inline void *map_domain_mem(unsigned long pa)
}
return va;
}
+#endif