aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-01-23 14:11:39 +0100
committerJan Beulich <jbeulich@suse.com>2013-01-23 14:11:39 +0100
commitbad20150553601964143dff43d932accceba3e5e (patch)
tree7185f1c557326846a774513a38ae276d9e3e279a
parentb52f69ab85a35d6140ca5e8ec74240fb1e0a5e7e (diff)
downloadxen-bad20150553601964143dff43d932accceba3e5e.tar.gz
xen-bad20150553601964143dff43d932accceba3e5e.tar.bz2
xen-bad20150553601964143dff43d932accceba3e5e.zip
x86: properly use map_domain_page() during page table manipulation
Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
-rw-r--r--xen/arch/x86/debug.c15
-rw-r--r--xen/arch/x86/mm.c31
-rw-r--r--xen/arch/x86/traps.c5
-rw-r--r--xen/arch/x86/x86_64/compat/traps.c5
-rw-r--r--xen/arch/x86/x86_64/mm.c63
-rw-r--r--xen/arch/x86/x86_64/traps.c12
-rw-r--r--xen/include/asm-x86/page.h4
7 files changed, 82 insertions, 53 deletions
diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
index f7469daab6..e67473e0a0 100644
--- a/xen/arch/x86/debug.c
+++ b/xen/arch/x86/debug.c
@@ -98,8 +98,9 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
if ( pgd3val == 0 )
{
- l4t = mfn_to_virt(mfn);
+ l4t = map_domain_page(mfn);
l4e = l4t[l4_table_offset(vaddr)];
+ unmap_domain_page(l4t);
mfn = l4e_get_pfn(l4e);
DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%lx\n", l4t,
l4_table_offset(vaddr), l4e, mfn);
@@ -109,20 +110,23 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
return INVALID_MFN;
}
- l3t = mfn_to_virt(mfn);
+ l3t = map_domain_page(mfn);
l3e = l3t[l3_table_offset(vaddr)];
+ unmap_domain_page(l3t);
mfn = l3e_get_pfn(l3e);
DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%lx\n", l3t,
l3_table_offset(vaddr), l3e, mfn);
- if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+ if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
+ (l3e_get_flags(l3e) & _PAGE_PSE) )
{
DBGP1("l3 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
return INVALID_MFN;
}
}
- l2t = mfn_to_virt(mfn);
+ l2t = map_domain_page(mfn);
l2e = l2t[l2_table_offset(vaddr)];
+ unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%lx\n", l2t, l2_table_offset(vaddr),
l2e, mfn);
@@ -132,8 +136,9 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
return INVALID_MFN;
}
- l1t = mfn_to_virt(mfn);
+ l1t = map_domain_page(mfn);
l1e = l1t[l1_table_offset(vaddr)];
+ unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%lx\n", l1t, l1_table_offset(vaddr),
l1e, mfn);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c270c1cd29..6706f1d3cc 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1331,7 +1331,7 @@ static int alloc_l4_table(struct page_info *page, int preemptible)
{
struct domain *d = page_get_owner(page);
unsigned long pfn = page_to_mfn(page);
- l4_pgentry_t *pl4e = page_to_virt(page);
+ l4_pgentry_t *pl4e = map_domain_page(pfn);
unsigned int i;
int rc = 0, partial = page->partial_pte;
@@ -1365,12 +1365,16 @@ static int alloc_l4_table(struct page_info *page, int preemptible)
put_page_from_l4e(pl4e[i], pfn, 0, 0);
}
if ( rc < 0 )
+ {
+ unmap_domain_page(pl4e);
return rc;
+ }
adjust_guest_l4e(pl4e[i], d);
}
init_guest_l4_table(pl4e, d);
+ unmap_domain_page(pl4e);
return rc > 0 ? 0 : rc;
}
@@ -1464,7 +1468,7 @@ static int free_l4_table(struct page_info *page, int preemptible)
{
struct domain *d = page_get_owner(page);
unsigned long pfn = page_to_mfn(page);
- l4_pgentry_t *pl4e = page_to_virt(page);
+ l4_pgentry_t *pl4e = map_domain_page(pfn);
int rc = 0, partial = page->partial_pte;
unsigned int i = page->nr_validated_ptes - !partial;
@@ -1487,6 +1491,9 @@ static int free_l4_table(struct page_info *page, int preemptible)
page->partial_pte = 0;
rc = -EAGAIN;
}
+
+ unmap_domain_page(pl4e);
+
return rc > 0 ? 0 : rc;
}
@@ -4983,15 +4990,23 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
}
-void free_xen_pagetable(void *v)
+void *alloc_xen_pagetable(void)
{
- if ( system_state == SYS_STATE_early_boot )
- return;
+ if ( system_state != SYS_STATE_early_boot )
+ {
+ void *ptr = alloc_xenheap_page();
- if ( is_xen_heap_page(virt_to_page(v)) )
+ BUG_ON(!dom0 && !ptr);
+ return ptr;
+ }
+
+ return mfn_to_virt(alloc_boot_pages(1, 1));
+}
+
+void free_xen_pagetable(void *v)
+{
+ if ( system_state != SYS_STATE_early_boot )
free_xenheap_page(v);
- else
- free_domheap_page(virt_to_page(v));
}
/* Convert to from superpage-mapping flags for map_pages_to_xen(). */
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index a97033d1a6..24ca6e25e8 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -180,6 +180,11 @@ static void show_guest_stack(struct vcpu *v, struct cpu_user_regs *regs)
printk(" %p", _p(addr));
stack++;
}
+ if ( mask == PAGE_SIZE )
+ {
+ BUILD_BUG_ON(PAGE_SIZE == STACK_SIZE);
+ unmap_domain_page(stack);
+ }
if ( i == 0 )
printk("Stack empty.");
printk("\n");
diff --git a/xen/arch/x86/x86_64/compat/traps.c b/xen/arch/x86/x86_64/compat/traps.c
index 7add636a10..2b67e351ea 100644
--- a/xen/arch/x86/x86_64/compat/traps.c
+++ b/xen/arch/x86/x86_64/compat/traps.c
@@ -56,6 +56,11 @@ void compat_show_guest_stack(struct vcpu *v, struct cpu_user_regs *regs,
printk(" %08x", addr);
stack++;
}
+ if ( mask == PAGE_SIZE )
+ {
+ BUILD_BUG_ON(PAGE_SIZE == STACK_SIZE);
+ unmap_domain_page(stack);
+ }
if ( i == 0 )
printk("Stack empty.");
printk("\n");
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 89897e3581..bdf042de9e 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -65,22 +65,6 @@ int __mfn_valid(unsigned long mfn)
pdx_group_valid));
}
-void *alloc_xen_pagetable(void)
-{
- unsigned long mfn;
-
- if ( system_state != SYS_STATE_early_boot )
- {
- struct page_info *pg = alloc_domheap_page(NULL, 0);
-
- BUG_ON(!dom0 && !pg);
- return pg ? page_to_virt(pg) : NULL;
- }
-
- mfn = alloc_boot_pages(1, 1);
- return mfn_to_virt(mfn);
-}
-
l3_pgentry_t *virt_to_xen_l3e(unsigned long v)
{
l4_pgentry_t *pl4e;
@@ -154,35 +138,45 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
if ( is_hvm_vcpu(v) )
return NULL;
- l4t = mfn_to_virt(mfn);
+ l4t = map_domain_page(mfn);
l4e = l4t[l4_table_offset(addr)];
- mfn = l4e_get_pfn(l4e);
+ unmap_domain_page(l4t);
if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
return NULL;
- l3t = mfn_to_virt(mfn);
+ l3t = map_l3t_from_l4e(l4e);
l3e = l3t[l3_table_offset(addr)];
+ unmap_domain_page(l3t);
mfn = l3e_get_pfn(l3e);
if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
return NULL;
if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
- return mfn_to_virt(mfn) + (addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));
+ {
+ mfn += PFN_DOWN(addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));
+ goto ret;
+ }
- l2t = mfn_to_virt(mfn);
+ l2t = map_domain_page(mfn);
l2e = l2t[l2_table_offset(addr)];
+ unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
return NULL;
if ( (l2e_get_flags(l2e) & _PAGE_PSE) )
- return mfn_to_virt(mfn) + (addr & ((1UL << L2_PAGETABLE_SHIFT) - 1));
+ {
+ mfn += PFN_DOWN(addr & ((1UL << L2_PAGETABLE_SHIFT) - 1));
+ goto ret;
+ }
- l1t = mfn_to_virt(mfn);
+ l1t = map_domain_page(mfn);
l1e = l1t[l1_table_offset(addr)];
+ unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
return NULL;
- return mfn_to_virt(mfn) + (addr & ~PAGE_MASK);
+ ret:
+ return map_domain_page(mfn) + (addr & ~PAGE_MASK);
}
void __init pfn_pdx_hole_setup(unsigned long mask)
@@ -519,10 +513,9 @@ static int setup_compat_m2p_table(struct mem_hotadd_info *info)
static int setup_m2p_table(struct mem_hotadd_info *info)
{
unsigned long i, va, smap, emap;
- unsigned int n, memflags;
+ unsigned int n;
l2_pgentry_t *l2_ro_mpt = NULL;
l3_pgentry_t *l3_ro_mpt = NULL;
- struct page_info *l2_pg;
int ret = 0;
ASSERT(l4e_get_flags(idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)])
@@ -560,7 +553,6 @@ static int setup_m2p_table(struct mem_hotadd_info *info)
}
va = RO_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping);
- memflags = MEMF_node(phys_to_nid(i << PAGE_SHIFT));
for ( n = 0; n < CNT; ++n)
if ( mfn_valid(i + n * PDX_GROUP_COUNT) )
@@ -587,19 +579,18 @@ static int setup_m2p_table(struct mem_hotadd_info *info)
l2_table_offset(va);
else
{
- l2_pg = alloc_domheap_page(NULL, memflags);
-
- if (!l2_pg)
+ l2_ro_mpt = alloc_xen_pagetable();
+ if ( !l2_ro_mpt )
{
ret = -ENOMEM;
goto error;
}
- l2_ro_mpt = page_to_virt(l2_pg);
clear_page(l2_ro_mpt);
l3e_write(&l3_ro_mpt[l3_table_offset(va)],
- l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER));
- l2_ro_mpt += l2_table_offset(va);
+ l3e_from_paddr(__pa(l2_ro_mpt),
+ __PAGE_HYPERVISOR | _PAGE_USER));
+ l2_ro_mpt += l2_table_offset(va);
}
/* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
@@ -762,12 +753,12 @@ void __init paging_init(void)
l4_table_offset(HIRO_COMPAT_MPT_VIRT_START));
l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(
HIRO_COMPAT_MPT_VIRT_START)]);
- if ( (l2_pg = alloc_domheap_page(NULL, 0)) == NULL )
+ if ( (l2_ro_mpt = alloc_xen_pagetable()) == NULL )
goto nomem;
- compat_idle_pg_table_l2 = l2_ro_mpt = page_to_virt(l2_pg);
+ compat_idle_pg_table_l2 = l2_ro_mpt;
clear_page(l2_ro_mpt);
l3e_write(&l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
- l3e_from_page(l2_pg, __PAGE_HYPERVISOR));
+ l3e_from_paddr(__pa(l2_ro_mpt), __PAGE_HYPERVISOR));
l2_ro_mpt += l2_table_offset(HIRO_COMPAT_MPT_VIRT_START);
/* Allocate and map the compatibility mode machine-to-phys table. */
mpt_size = (mpt_size >> 1) + (1UL << (L2_PAGETABLE_SHIFT - 1));
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 43a6f2ef76..8a1d912110 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -175,8 +175,9 @@ void show_page_walk(unsigned long addr)
printk("Pagetable walk from %016lx:\n", addr);
- l4t = mfn_to_virt(mfn);
+ l4t = map_domain_page(mfn);
l4e = l4t[l4_table_offset(addr)];
+ unmap_domain_page(l4t);
mfn = l4e_get_pfn(l4e);
pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
@@ -186,8 +187,9 @@ void show_page_walk(unsigned long addr)
!mfn_valid(mfn) )
return;
- l3t = mfn_to_virt(mfn);
+ l3t = map_domain_page(mfn);
l3e = l3t[l3_table_offset(addr)];
+ unmap_domain_page(l3t);
mfn = l3e_get_pfn(l3e);
pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
@@ -199,8 +201,9 @@ void show_page_walk(unsigned long addr)
!mfn_valid(mfn) )
return;
- l2t = mfn_to_virt(mfn);
+ l2t = map_domain_page(mfn);
l2e = l2t[l2_table_offset(addr)];
+ unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
@@ -212,8 +215,9 @@ void show_page_walk(unsigned long addr)
!mfn_valid(mfn) )
return;
- l1t = mfn_to_virt(mfn);
+ l1t = map_domain_page(mfn);
l1e = l1t[l1_table_offset(addr)];
+ unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 37b998b20e..4c3f4d729d 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -172,6 +172,10 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
#define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
#define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
+#define map_l1t_from_l2e(x) ((l1_pgentry_t *)map_domain_page(l2e_get_pfn(x)))
+#define map_l2t_from_l3e(x) ((l2_pgentry_t *)map_domain_page(l3e_get_pfn(x)))
+#define map_l3t_from_l4e(x) ((l3_pgentry_t *)map_domain_page(l4e_get_pfn(x)))
+
/* Given a virtual address, get an entry offset into a page table. */
#define l1_table_offset(a) \
(((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))