aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/mm
diff options
context:
space:
mode:
authorTim Deegan <tim@xen.org>2012-09-13 16:41:33 +0100
committerTim Deegan <tim@xen.org>2012-09-13 16:41:33 +0100
commitc622876000c5fdded9d546b93bf9cbbca9cdb081 (patch)
treea887c8d8c97af36cbc049e785f7ea2cdba80236c /xen/arch/x86/mm
parentf28cf4d6ce350ff364d1e9e2cce0e93025a5e372 (diff)
downloadxen-c622876000c5fdded9d546b93bf9cbbca9cdb081.tar.gz
xen-c622876000c5fdded9d546b93bf9cbbca9cdb081.tar.bz2
xen-c622876000c5fdded9d546b93bf9cbbca9cdb081.zip
x86/mm: remove the linear mapping of the p2m tables.
Mapping the p2m into the monitor tables was an important optimization on 32-bit builds, where it avoided mapping and unmapping p2m pages during a walk. On 64-bit it makes no difference -- see http://old-list-archives.xen.org/archives/html/xen-devel/2010-04/msg00981.html Get rid of it, and use the explicit walk for all lookups. Signed-off-by: Tim Deegan <tim@xen.org> Committed-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen/arch/x86/mm')
-rw-r--r--xen/arch/x86/mm/hap/hap.c5
-rw-r--r--xen/arch/x86/mm/p2m-pt.c191
-rw-r--r--xen/arch/x86/mm/shadow/multi.c8
3 files changed, 0 insertions, 204 deletions
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index ec6653a4a2..d2637d3b66 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -405,11 +405,6 @@ static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR);
- /* Install the domain-specific P2M table */
- l4e[l4_table_offset(RO_MPT_VIRT_START)] =
- l4e_from_pfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))),
- __PAGE_HYPERVISOR);
-
hap_unmap_domain_page(l4e);
}
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index b493fceac2..bf432c2794 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -460,186 +460,6 @@ out:
return rv;
}
-
-/* Read the current domain's p2m table (through the linear mapping). */
-static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m,
- unsigned long gfn, p2m_type_t *t,
- p2m_access_t *a, p2m_query_t q,
- unsigned int *page_order)
-{
- mfn_t mfn = _mfn(INVALID_MFN);
- p2m_type_t p2mt = p2m_mmio_dm;
- paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
- /* XXX This is for compatibility with the old model, where anything not
- * XXX marked as RAM was considered to be emulated MMIO space.
- * XXX Once we start explicitly registering MMIO regions in the p2m
- * XXX we will return p2m_invalid for unmapped gfns */
-
- l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
- l2_pgentry_t l2e = l2e_empty();
- l3_pgentry_t l3e = l3e_empty();
- int ret;
-
- ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START)
- / sizeof(l1_pgentry_t));
-
- /*
- * Read & process L3
- */
- p2m_entry = (l1_pgentry_t *)
- &__linear_l2_table[l2_linear_offset(RO_MPT_VIRT_START)
- + l3_linear_offset(addr)];
-pod_retry_l3:
- ret = __copy_from_user(&l3e, p2m_entry, sizeof(l3e));
-
- if ( ret != 0 || !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
- {
- if ( (l3e_get_flags(l3e) & _PAGE_PSE) &&
- (p2m_flags_to_type(l3e_get_flags(l3e)) == p2m_populate_on_demand) )
- {
- /* The read has succeeded, so we know that mapping exists */
- if ( q & P2M_ALLOC )
- {
- if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
- goto pod_retry_l3;
- p2mt = p2m_invalid;
- gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__);
- goto out;
- }
- else
- {
- p2mt = p2m_populate_on_demand;
- goto out;
- }
- }
- goto pod_retry_l2;
- }
-
- if ( l3e_get_flags(l3e) & _PAGE_PSE )
- {
- p2mt = p2m_flags_to_type(l3e_get_flags(l3e));
- ASSERT(l3e_get_pfn(l3e) != INVALID_MFN || !p2m_is_ram(p2mt));
- if (p2m_is_valid(p2mt) )
- mfn = _mfn(l3e_get_pfn(l3e) +
- l2_table_offset(addr) * L1_PAGETABLE_ENTRIES +
- l1_table_offset(addr));
- else
- p2mt = p2m_mmio_dm;
-
- if ( page_order )
- *page_order = PAGE_ORDER_1G;
- goto out;
- }
-
- /*
- * Read & process L2
- */
- p2m_entry = &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START)
- + l2_linear_offset(addr)];
-
-pod_retry_l2:
- ret = __copy_from_user(&l2e,
- p2m_entry,
- sizeof(l2e));
- if ( ret != 0
- || !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
- {
- if( (l2e_get_flags(l2e) & _PAGE_PSE)
- && ( p2m_flags_to_type(l2e_get_flags(l2e))
- == p2m_populate_on_demand ) )
- {
- /* The read has succeeded, so we know that the mapping
- * exits at this point. */
- if ( q & P2M_ALLOC )
- {
- if ( !p2m_pod_demand_populate(p2m, gfn,
- PAGE_ORDER_2M, q) )
- goto pod_retry_l2;
-
- /* Allocate failed. */
- p2mt = p2m_invalid;
- printk("%s: Allocate failed!\n", __func__);
- goto out;
- }
- else
- {
- p2mt = p2m_populate_on_demand;
- goto out;
- }
- }
-
- goto pod_retry_l1;
- }
-
- if (l2e_get_flags(l2e) & _PAGE_PSE)
- {
- p2mt = p2m_flags_to_type(l2e_get_flags(l2e));
- ASSERT(l2e_get_pfn(l2e) != INVALID_MFN || !p2m_is_ram(p2mt));
-
- if ( p2m_is_valid(p2mt) )
- mfn = _mfn(l2e_get_pfn(l2e) + l1_table_offset(addr));
- else
- p2mt = p2m_mmio_dm;
-
- if ( page_order )
- *page_order = PAGE_ORDER_2M;
- goto out;
- }
-
- /*
- * Read and process L1
- */
-
- /* Need to __copy_from_user because the p2m is sparse and this
- * part might not exist */
-pod_retry_l1:
- p2m_entry = &phys_to_machine_mapping[gfn];
-
- ret = __copy_from_user(&l1e,
- p2m_entry,
- sizeof(l1e));
-
- if ( ret == 0 ) {
- unsigned long l1e_mfn = l1e_get_pfn(l1e);
- p2mt = p2m_flags_to_type(l1e_get_flags(l1e));
- ASSERT( mfn_valid(_mfn(l1e_mfn)) || !p2m_is_ram(p2mt) ||
- p2m_is_paging(p2mt) );
-
- if ( p2mt == p2m_populate_on_demand )
- {
- /* The read has succeeded, so we know that the mapping
- * exits at this point. */
- if ( q & P2M_ALLOC )
- {
- if ( !p2m_pod_demand_populate(p2m, gfn,
- PAGE_ORDER_4K, q) )
- goto pod_retry_l1;
-
- /* Allocate failed. */
- p2mt = p2m_invalid;
- goto out;
- }
- else
- {
- p2mt = p2m_populate_on_demand;
- goto out;
- }
- }
-
- if ( p2m_is_valid(p2mt) || p2m_is_grant(p2mt) )
- mfn = _mfn(l1e_mfn);
- else
- /* XXX see above */
- p2mt = p2m_mmio_dm;
- }
-
- if ( page_order )
- *page_order = PAGE_ORDER_4K;
-out:
- *t = p2mt;
- return mfn;
-}
-
static mfn_t
p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn,
p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
@@ -666,10 +486,6 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn,
/* This pfn is higher than the highest the p2m map currently holds */
return _mfn(INVALID_MFN);
- /* Use the fast path with the linear mapping if we can */
- if ( p2m == p2m_get_hostp2m(current->domain) )
- return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q, page_order);
-
mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
{
@@ -904,17 +720,10 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
{
unsigned long entry_count = 0, pmbad = 0;
unsigned long mfn, gfn, m2pfn;
- int test_linear;
- struct domain *d = p2m->domain;
ASSERT(p2m_locked_by_me(p2m));
ASSERT(pod_locked_by_me(p2m));
- test_linear = ( (d == current->domain)
- && !pagetable_is_null(current->arch.monitor_table) );
- if ( test_linear )
- flush_tlb_local();
-
/* Audit part one: walk the domain's p2m table, checking the entries. */
if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
{
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index e7667e74e7..f5a8965656 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1472,14 +1472,6 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn)
shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);
}
- if ( shadow_mode_translate(v->domain) )
- {
- /* install domain-specific P2M table */
- sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
- shadow_l4e_from_mfn(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))),
- __PAGE_HYPERVISOR);
- }
-
sh_unmap_domain_page(sl4e);
}
#endif