aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/mm
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2012-09-12 13:59:26 +0100
committerKeir Fraser <keir@xen.org>2012-09-12 13:59:26 +0100
commit218adf199e6868eb4a15644a63e6991ede1613bc (patch)
treefaa5070fb10dee2354b8c753385e10fc32668a2d /xen/arch/x86/mm
parent5d1181a5ea5e0f11d481a94b16ed00d883f9726e (diff)
downloadxen-218adf199e6868eb4a15644a63e6991ede1613bc.tar.gz
xen-218adf199e6868eb4a15644a63e6991ede1613bc.tar.bz2
xen-218adf199e6868eb4a15644a63e6991ede1613bc.zip
x86: We can assume CONFIG_PAGING_LEVELS==4.
Signed-off-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/mm')
-rw-r--r--xen/arch/x86/mm/hap/hap.c175
-rw-r--r--xen/arch/x86/mm/p2m-pod.c4
-rw-r--r--xen/arch/x86/mm/p2m-pt.c87
-rw-r--r--xen/arch/x86/mm/p2m.c13
-rw-r--r--xen/arch/x86/mm/shadow/common.c109
-rw-r--r--xen/arch/x86/mm/shadow/multi.c262
-rw-r--r--xen/arch/x86/mm/shadow/private.h3
7 files changed, 25 insertions, 628 deletions
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 13b4be211a..ec6653a4a2 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -280,26 +280,6 @@ static struct page_info *hap_alloc_p2m_page(struct domain *d)
paging_lock_recursive(d);
pg = hap_alloc(d);
-#if CONFIG_PAGING_LEVELS == 3
- /* Under PAE mode, top-level P2M table should be allocated below 4GB space
- * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to
- * force this requirement, and exchange the guaranteed 32-bit-clean
- * page for the one we just hap_alloc()ed. */
- if ( d->arch.paging.hap.p2m_pages == 0
- && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
- {
- free_domheap_page(pg);
- pg = alloc_domheap_page(
- NULL, MEMF_bits(32) | MEMF_node(domain_to_node(d)));
- if ( likely(pg != NULL) )
- {
- void *p = __map_domain_page(pg);
- clear_page(p);
- hap_unmap_domain_page(p);
- }
- }
-#endif
-
if ( likely(pg != NULL) )
{
d->arch.paging.hap.total_pages--;
@@ -403,7 +383,6 @@ hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
return 0;
}
-#if CONFIG_PAGING_LEVELS == 4
static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
{
struct domain *d = v->domain;
@@ -433,103 +412,20 @@ static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
hap_unmap_domain_page(l4e);
}
-#endif /* CONFIG_PAGING_LEVELS == 4 */
-
-#if CONFIG_PAGING_LEVELS == 3
-static void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t l2hmfn)
-{
- struct domain *d = v->domain;
- struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
- l2_pgentry_t *l2e;
- l3_pgentry_t *p2m;
- int i;
-
- l2e = hap_map_domain_page(l2hmfn);
- ASSERT(l2e != NULL);
-
- /* Copy the common Xen mappings from the idle domain */
- memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
- &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
- L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
-
- /* Install the per-domain mappings for this domain */
- for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
- l2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
- l2e_from_pfn(
- mfn_x(page_to_mfn(perdomain_pt_page(d, i))),
- __PAGE_HYPERVISOR);
-
- /* No linear mapping; will be set up by monitor-table contructor. */
- for ( i = 0; i < 4; i++ )
- l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
- l2e_empty();
-
- /* Install the domain-specific p2m table */
- ASSERT(pagetable_get_pfn(p2m_get_pagetable(hostp2m)) != 0);
- p2m = hap_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(hostp2m)));
- for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
- {
- l2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
- (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
- ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
- __PAGE_HYPERVISOR)
- : l2e_empty();
- }
- hap_unmap_domain_page(p2m);
- hap_unmap_domain_page(l2e);
-}
-#endif
static mfn_t hap_make_monitor_table(struct vcpu *v)
{
struct domain *d = v->domain;
struct page_info *pg;
+ mfn_t m4mfn;
ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
-#if CONFIG_PAGING_LEVELS == 4
- {
- mfn_t m4mfn;
- if ( (pg = hap_alloc(d)) == NULL )
- goto oom;
- m4mfn = page_to_mfn(pg);
- hap_install_xen_entries_in_l4(v, m4mfn);
- return m4mfn;
- }
-#elif CONFIG_PAGING_LEVELS == 3
- {
- mfn_t m3mfn, m2mfn;
- l3_pgentry_t *l3e;
- l2_pgentry_t *l2e;
- int i;
-
- if ( (pg = hap_alloc(d)) == NULL )
- goto oom;
- m3mfn = page_to_mfn(pg);
-
- /* Install a monitor l2 table in slot 3 of the l3 table.
- * This is used for all Xen entries, including linear maps
- */
- if ( (pg = hap_alloc(d)) == NULL )
- goto oom;
- m2mfn = page_to_mfn(pg);
- l3e = hap_map_domain_page(m3mfn);
- l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
- hap_install_xen_entries_in_l2h(v, m2mfn);
- /* Install the monitor's own linear map */
- l2e = hap_map_domain_page(m2mfn);
- for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
- l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
- (l3e_get_flags(l3e[i]) & _PAGE_PRESENT)
- ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR)
- : l2e_empty();
- hap_unmap_domain_page(l2e);
- hap_unmap_domain_page(l3e);
-
- HAP_PRINTK("new monitor table: %#lx\n", mfn_x(m3mfn));
- return m3mfn;
- }
-#endif
+ if ( (pg = hap_alloc(d)) == NULL )
+ goto oom;
+ m4mfn = page_to_mfn(pg);
+ hap_install_xen_entries_in_l4(v, m4mfn);
+ return m4mfn;
oom:
HAP_ERROR("out of memory building monitor pagetable\n");
@@ -541,16 +437,6 @@ static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn)
{
struct domain *d = v->domain;
-#if CONFIG_PAGING_LEVELS == 3
- /* Need to destroy the l2 monitor page in slot 4 too */
- {
- l3_pgentry_t *l3e = hap_map_domain_page(mmfn);
- ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
- hap_free(d, _mfn(l3e_get_pfn(l3e[3])));
- hap_unmap_domain_page(l3e);
- }
-#endif
-
/* Put the memory back in the pool */
hap_free(d, mmfn);
}
@@ -814,47 +700,6 @@ static void hap_update_paging_modes(struct vcpu *v)
put_gfn(d, cr3_gfn);
}
-#if CONFIG_PAGING_LEVELS == 3
-static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e)
-/* Special case, only used for PAE hosts: update the mapping of the p2m
- * table. This is trivial in other paging modes (one top-level entry
- * points to the top-level p2m, no maintenance needed), but PAE makes
- * life difficult by needing a copy of the p2m table in eight l2h slots
- * in the monitor table. This function makes fresh copies when a p2m
- * l3e changes. */
-{
- l2_pgentry_t *ml2e;
- struct vcpu *v;
- unsigned int index;
-
- index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);
- ASSERT(index < MACHPHYS_MBYTES>>1);
-
- for_each_vcpu ( d, v )
- {
- if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
- continue;
-
- ASSERT(paging_mode_external(v->domain));
-
- if ( v == current ) /* OK to use linear map of monitor_table */
- ml2e = __linear_l2_table + l2_linear_offset(RO_MPT_VIRT_START);
- else {
- l3_pgentry_t *ml3e;
- ml3e = hap_map_domain_page(
- pagetable_get_mfn(v->arch.monitor_table));
- ASSERT(l3e_get_flags(ml3e[3]) & _PAGE_PRESENT);
- ml2e = hap_map_domain_page(_mfn(l3e_get_pfn(ml3e[3])));
- ml2e += l2_table_offset(RO_MPT_VIRT_START);
- hap_unmap_domain_page(ml3e);
- }
- ml2e[index] = l2e_from_pfn(l3e_get_pfn(*l3e), __PAGE_HYPERVISOR);
- if ( v != current )
- hap_unmap_domain_page(ml2e);
- }
-}
-#endif
-
static void
hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
@@ -886,14 +731,6 @@ hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
&& (level == 1 || (level == 2 && (old_flags & _PAGE_PSE))) )
flush_tlb_mask(d->domain_dirty_cpumask);
-#if CONFIG_PAGING_LEVELS == 3
- /* install P2M in monitor table for PAE Xen */
- if ( level == 3 )
- /* We have written to the p2m l3: need to sync the per-vcpu
- * copies of it in the monitor tables */
- p2m_install_entry_in_monitors(d, (l3_pgentry_t *)p);
-#endif
-
paging_unlock(d);
if ( flush_nestedp2m )
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 1ddf991208..0125fe656c 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -1120,10 +1120,6 @@ guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
if ( !paging_mode_translate(d) )
return -EINVAL;
- rc = p2m_gfn_check_limit(d, gfn, order);
- if ( rc != 0 )
- return rc;
-
gfn_lock(p2m, gfn, order);
P2M_DEBUG("mark pod gfn=%#lx\n", gfn);
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index f0d1423705..b493fceac2 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -150,10 +150,8 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
unsigned int nlevel, unsigned int flags)
{
-#if CONFIG_PAGING_LEVELS == 4
if ( iommu_hap_pt_share )
l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
-#endif
}
static int
@@ -189,10 +187,6 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table,
p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 4);
break;
case PGT_l2_page_table:
-#if CONFIG_PAGING_LEVELS == 3
- /* for PAE mode, PDPE only has PCD/PWT/P bits available */
- new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT);
-#endif
p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 3);
break;
@@ -317,12 +311,11 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
__trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
}
-#if CONFIG_PAGING_LEVELS >= 4
if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
L4_PAGETABLE_SHIFT - PAGE_SHIFT,
L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
goto out;
-#endif
+
/*
* Try to allocate 1GB page table if this feature is supported.
*/
@@ -361,18 +354,9 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
if ( l1e_get_flags(old_entry) & _PAGE_PRESENT )
p2m_free_entry(p2m, &old_entry, page_order);
}
- /*
- * When using PAE Xen, we only allow 33 bits of pseudo-physical
- * address in translated guests (i.e. 8 GBytes). This restriction
- * comes from wanting to map the P2M table into the 16MB RO_MPT hole
- * in Xen's address space for translated PV guests.
- * When using AMD's NPT on PAE Xen, we are restricted to 4GB.
- */
else if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
L3_PAGETABLE_SHIFT - PAGE_SHIFT,
- ((CONFIG_PAGING_LEVELS == 3)
- ? (hap_enabled(p2m->domain) ? 4 : 8)
- : L3_PAGETABLE_ENTRIES),
+ L3_PAGETABLE_ENTRIES,
PGT_l2_page_table) )
goto out;
@@ -493,15 +477,12 @@ static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m,
l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
l2_pgentry_t l2e = l2e_empty();
- int ret;
-#if CONFIG_PAGING_LEVELS >= 4
l3_pgentry_t l3e = l3e_empty();
-#endif
+ int ret;
ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START)
/ sizeof(l1_pgentry_t));
-#if CONFIG_PAGING_LEVELS >= 4
/*
* Read & process L3
*/
@@ -549,7 +530,7 @@ pod_retry_l3:
*page_order = PAGE_ORDER_1G;
goto out;
}
-#endif
+
/*
* Read & process L2
*/
@@ -691,7 +672,6 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn,
mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
-#if CONFIG_PAGING_LEVELS >= 4
{
l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
l4e += l4_table_offset(addr);
@@ -703,19 +683,9 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn,
mfn = _mfn(l4e_get_pfn(*l4e));
unmap_domain_page(l4e);
}
-#endif
{
l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
-#if CONFIG_PAGING_LEVELS == 3
- /* On PAE hosts the p2m has eight l3 entries, not four (see
- * shadow_set_p2m_entry()) so we can't use l3_table_offset.
- * Instead, just count the number of l3es from zero. It's safe
- * to do this because we already checked that the gfn is within
- * the bounds of the p2m. */
- l3e += (addr >> L3_PAGETABLE_SHIFT);
-#else
l3e += l3_table_offset(addr);
-#endif
pod_retry_l3:
if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
{
@@ -828,10 +798,8 @@ static void p2m_change_type_global(struct p2m_domain *p2m,
mfn_t l1mfn, l2mfn, l3mfn;
unsigned long i1, i2, i3;
l3_pgentry_t *l3e;
-#if CONFIG_PAGING_LEVELS == 4
l4_pgentry_t *l4e;
unsigned long i4;
-#endif /* CONFIG_PAGING_LEVELS == 4 */
BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
BUG_ON(ot != nt && (ot == p2m_mmio_direct || nt == p2m_mmio_direct));
@@ -844,14 +812,8 @@ static void p2m_change_type_global(struct p2m_domain *p2m,
ASSERT(p2m_locked_by_me(p2m));
-#if CONFIG_PAGING_LEVELS == 4
l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
-#else /* CONFIG_PAGING_LEVELS == 3 */
- l3mfn = _mfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
- l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
-#endif
-#if CONFIG_PAGING_LEVELS >= 4
for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
{
if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
@@ -860,9 +822,8 @@ static void p2m_change_type_global(struct p2m_domain *p2m,
}
l3mfn = _mfn(l4e_get_pfn(l4e[i4]));
l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
-#endif
for ( i3 = 0;
- i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
+ i3 < L3_PAGETABLE_ENTRIES;
i3++ )
{
if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
@@ -901,11 +862,7 @@ static void p2m_change_type_global(struct p2m_domain *p2m,
mfn = l2e_get_pfn(l2e[i2]);
/* Do not use get_gpfn_from_mfn because it may return
SHARED_M2P_ENTRY */
- gfn = (i2 + (i3
-#if CONFIG_PAGING_LEVELS >= 4
- + (i4 * L3_PAGETABLE_ENTRIES)
-#endif
- )
+ gfn = (i2 + (i3 + (i4 * L3_PAGETABLE_ENTRIES))
* L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES;
flags = p2m_type_to_flags(nt, _mfn(mfn));
l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE);
@@ -924,12 +881,8 @@ static void p2m_change_type_global(struct p2m_domain *p2m,
if ( p2m_flags_to_type(flags) != ot )
continue;
mfn = l1e_get_pfn(l1e[i1]);
- gfn = i1 + (i2 + (i3
-#if CONFIG_PAGING_LEVELS >= 4
- + (i4 * L3_PAGETABLE_ENTRIES)
-#endif
- )
- * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES;
+ gfn = i1 + (i2 + (i3 + (i4 * L3_PAGETABLE_ENTRIES))
+ * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES;
/* create a new 1le entry with the new type */
flags = p2m_type_to_flags(nt, _mfn(mfn));
l1e_content = p2m_l1e_from_pfn(mfn, flags);
@@ -940,17 +893,10 @@ static void p2m_change_type_global(struct p2m_domain *p2m,
}
unmap_domain_page(l2e);
}
-#if CONFIG_PAGING_LEVELS >= 4
unmap_domain_page(l3e);
}
-#endif
-#if CONFIG_PAGING_LEVELS == 4
unmap_domain_page(l4e);
-#else /* CONFIG_PAGING_LEVELS == 3 */
- unmap_domain_page(l3e);
-#endif
-
}
#if P2M_AUDIT
@@ -976,19 +922,12 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
l1_pgentry_t *l1e;
int i1, i2;
-#if CONFIG_PAGING_LEVELS == 4
l4_pgentry_t *l4e;
l3_pgentry_t *l3e;
int i4, i3;
l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
-#else /* CONFIG_PAGING_LEVELS == 3 */
- l3_pgentry_t *l3e;
- int i3;
- l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
-#endif
gfn = 0;
-#if CONFIG_PAGING_LEVELS >= 4
for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
{
if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
@@ -997,9 +936,8 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
continue;
}
l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
-#endif
for ( i3 = 0;
- i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
+ i3 < L3_PAGETABLE_ENTRIES;
i3++ )
{
if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
@@ -1101,17 +1039,10 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
}
unmap_domain_page(l2e);
}
-#if CONFIG_PAGING_LEVELS >= 4
unmap_domain_page(l3e);
}
-#endif
-#if CONFIG_PAGING_LEVELS == 4
unmap_domain_page(l4e);
-#else /* CONFIG_PAGING_LEVELS == 3 */
- unmap_domain_page(l3e);
-#endif
-
}
if ( entry_count != p2m->pod.entry_count )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e84549f932..f6e0352bf3 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -343,14 +343,7 @@ int p2m_alloc_table(struct p2m_domain *p2m)
P2M_PRINTK("allocating p2m table\n");
- p2m_top = p2m_alloc_ptp(p2m,
-#if CONFIG_PAGING_LEVELS == 4
- PGT_l4_page_table
-#else
- PGT_l3_page_table
-#endif
- );
-
+ p2m_top = p2m_alloc_ptp(p2m, PGT_l4_page_table);
if ( p2m_top == NULL )
{
p2m_unlock(p2m);
@@ -545,10 +538,6 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
return 0;
}
- rc = p2m_gfn_check_limit(d, gfn, page_order);
- if ( rc != 0 )
- return rc;
-
p2m_lock(p2m);
P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn);
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 2039390b1f..760e9b516f 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -566,10 +566,8 @@ static inline void _sh_resync_l1(struct vcpu *v, mfn_t gmfn, mfn_t snpmfn)
SHADOW_INTERNAL_NAME(sh_resync_l1, 2)(v, gmfn, snpmfn);
else if ( pg->shadow_flags & SHF_L1_PAE )
SHADOW_INTERNAL_NAME(sh_resync_l1, 3)(v, gmfn, snpmfn);
-#if CONFIG_PAGING_LEVELS >= 4
else if ( pg->shadow_flags & SHF_L1_64 )
SHADOW_INTERNAL_NAME(sh_resync_l1, 4)(v, gmfn, snpmfn);
-#endif
}
@@ -872,10 +870,8 @@ static int sh_skip_sync(struct vcpu *v, mfn_t gl1mfn)
return SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, 2)(v, gl1mfn);
else if ( pg->shadow_flags & SHF_L1_PAE )
return SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, 3)(v, gl1mfn);
-#if CONFIG_PAGING_LEVELS >= 4
else if ( pg->shadow_flags & SHF_L1_64 )
return SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, 4)(v, gl1mfn);
-#endif
SHADOW_ERROR("gmfn 0x%lx was OOS but not shadowed as an l1.\n",
mfn_x(gl1mfn));
BUG();
@@ -1083,7 +1079,6 @@ sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size)
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3)
(v, gmfn, entry, size);
-#if CONFIG_PAGING_LEVELS >= 4
if ( page->shadow_flags & SHF_L1_64 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 4)
(v, gmfn, entry, size);
@@ -1099,10 +1094,7 @@ sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size)
if ( page->shadow_flags & SHF_L4_64 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4)
(v, gmfn, entry, size);
-#else /* 32-bit hypervisor does not support 64-bit guests */
- ASSERT((page->shadow_flags
- & (SHF_L4_64|SHF_L3_64|SHF_L2H_64|SHF_L2_64|SHF_L1_64)) == 0);
-#endif
+
this_cpu(trace_shadow_path_flags) |= (result<<(TRCE_SFLAG_SET_CHANGED));
return result;
@@ -1265,11 +1257,9 @@ void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn, int user_only)
case SH_type_l2h_pae_shadow:
SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, 3)(v, smfn, user_only);
break;
-#if CONFIG_PAGING_LEVELS >= 4
case SH_type_l4_64_shadow:
SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, 4)(v, smfn, user_only);
break;
-#endif
default:
SHADOW_ERROR("top-level shadow has bad type %08x\n", sp->u.sh.type);
BUG();
@@ -1647,51 +1637,6 @@ shadow_free_p2m_page(struct domain *d, struct page_info *pg)
paging_unlock(d);
}
-#if CONFIG_PAGING_LEVELS == 3
-static void p2m_install_entry_in_monitors(struct domain *d,
- l3_pgentry_t *l3e)
-/* Special case, only used for external-mode domains on PAE hosts:
- * update the mapping of the p2m table. Once again, this is trivial in
- * other paging modes (one top-level entry points to the top-level p2m,
- * no maintenance needed), but PAE makes life difficult by needing a
- * copy the eight l3es of the p2m table in eight l2h slots in the
- * monitor table. This function makes fresh copies when a p2m l3e
- * changes. */
-{
- l2_pgentry_t *ml2e;
- struct vcpu *v;
- unsigned int index;
-
- index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);
- ASSERT(index < MACHPHYS_MBYTES>>1);
-
- for_each_vcpu(d, v)
- {
- if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
- continue;
- ASSERT(shadow_mode_external(v->domain));
-
- SHADOW_DEBUG(P2M, "d=%u v=%u index=%u mfn=%#lx\n",
- d->domain_id, v->vcpu_id, index, l3e_get_pfn(*l3e));
-
- if ( v == current ) /* OK to use linear map of monitor_table */
- ml2e = __linear_l2_table + l2_linear_offset(RO_MPT_VIRT_START);
- else
- {
- l3_pgentry_t *ml3e;
- ml3e = sh_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
- ASSERT(l3e_get_flags(ml3e[3]) & _PAGE_PRESENT);
- ml2e = sh_map_domain_page(_mfn(l3e_get_pfn(ml3e[3])));
- ml2e += l2_table_offset(RO_MPT_VIRT_START);
- sh_unmap_domain_page(ml3e);
- }
- ml2e[index] = l2e_from_pfn(l3e_get_pfn(*l3e), __PAGE_HYPERVISOR);
- if ( v != current )
- sh_unmap_domain_page(ml2e);
- }
-}
-#endif
-
/* Set the pool of shadow pages to the required number of pages.
* Input will be rounded up to at least shadow_min_acceptable_pages(),
* plus space for the p2m table.
@@ -2141,7 +2086,6 @@ void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3)(v, smfn);
break;
-#if CONFIG_PAGING_LEVELS >= 4
case SH_type_l1_64_shadow:
case SH_type_fl1_64_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(v, smfn);
@@ -2158,7 +2102,7 @@ void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
case SH_type_l4_64_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4)(v, smfn);
break;
-#endif
+
default:
SHADOW_ERROR("tried to destroy shadow of bad type %08lx\n",
(unsigned long)t);
@@ -2197,13 +2141,8 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* fl1_pae */
NULL, /* l2_pae */
NULL, /* l2h_pae */
-#if CONFIG_PAGING_LEVELS >= 4
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* l1_64 */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* fl1_64 */
-#else
- NULL, /* l1_64 */
- NULL, /* fl1_64 */
-#endif
NULL, /* l2_64 */
NULL, /* l2h_64 */
NULL, /* l3_64 */
@@ -2310,7 +2249,6 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
+ ((fault_addr & VADDR_MASK) >> 18), 6); break;
}
}
-#if CONFIG_PAGING_LEVELS >= 4
else if ( v->arch.paging.mode->guest_levels == 4 )
{
/* 64bit w2k3: linear map at 0xfffff68000000000 */
@@ -2351,7 +2289,6 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
/* FreeBSD 64bit: direct map at 0xffffff0000000000 */
GUESS(0xffffff0000000000 + (gfn << PAGE_SHIFT), 6);
}
-#endif /* CONFIG_PAGING_LEVELS >= 4 */
#undef GUESS
}
@@ -2423,18 +2360,14 @@ int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,2)
(v, gmfn, smfn, off);
}
-#if CONFIG_PAGING_LEVELS >= 3
else if ( sp->u.sh.type == SH_type_l1_pae_shadow
|| sp->u.sh.type == SH_type_fl1_pae_shadow )
return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,3)
(v, gmfn, smfn, off);
-#if CONFIG_PAGING_LEVELS >= 4
else if ( sp->u.sh.type == SH_type_l1_64_shadow
|| sp->u.sh.type == SH_type_fl1_64_shadow )
return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,4)
(v, gmfn, smfn, off);
-#endif
-#endif
return 0;
}
@@ -2458,13 +2391,8 @@ int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn)
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* fl1_pae */
NULL, /* l2_pae */
NULL, /* l2h_pae */
-#if CONFIG_PAGING_LEVELS >= 4
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* l1_64 */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* fl1_64 */
-#else
- NULL, /* l1_64 */
- NULL, /* fl1_64 */
-#endif
NULL, /* l2_64 */
NULL, /* l2h_64 */
NULL, /* l3_64 */
@@ -2560,7 +2488,6 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, mfn_t smfn)
case SH_type_l2h_pae_shadow:
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 3)(v, vaddr, pmfn);
break;
-#if CONFIG_PAGING_LEVELS >= 4
case SH_type_l1_64_shadow:
case SH_type_l2_64_shadow:
case SH_type_l2h_64_shadow:
@@ -2568,7 +2495,6 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, mfn_t smfn)
case SH_type_l4_64_shadow:
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 4)(v, vaddr, pmfn);
break;
-#endif
default: BUG(); /* Some wierd unknown shadow type */
}
@@ -2607,17 +2533,10 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all)
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2h_pae */
NULL, /* l1_64 */
NULL, /* fl1_64 */
-#if CONFIG_PAGING_LEVELS >= 4
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2_64 */
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2h_64 */
SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, 4), /* l3_64 */
SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, 4), /* l4_64 */
-#else
- NULL, /* l2_64 */
- NULL, /* l2h_64 */
- NULL, /* l3_64 */
- NULL, /* l4_64 */
-#endif
NULL, /* p2m */
NULL /* unused */
};
@@ -2697,13 +2616,11 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all)
DO_UNSHADOW(SH_type_l2h_pae_shadow);
DO_UNSHADOW(SH_type_l2_pae_shadow);
DO_UNSHADOW(SH_type_l1_pae_shadow);
-#if CONFIG_PAGING_LEVELS >= 4
DO_UNSHADOW(SH_type_l4_64_shadow);
DO_UNSHADOW(SH_type_l3_64_shadow);
DO_UNSHADOW(SH_type_l2h_64_shadow);
DO_UNSHADOW(SH_type_l2_64_shadow);
DO_UNSHADOW(SH_type_l1_64_shadow);
-#endif
#undef DO_UNSHADOW
@@ -2741,7 +2658,6 @@ sh_remove_all_shadows_and_parents(struct vcpu *v, mfn_t gmfn)
/**************************************************************************/
-#if CONFIG_PAGING_LEVELS >= 4
/* Reset the up-pointers of every L3 shadow to 0.
* This is called when l3 shadows stop being pinnable, to clear out all
* the list-head bits so the up-pointer field is properly inititalised. */
@@ -2750,7 +2666,6 @@ static int sh_clear_up_pointer(struct vcpu *v, mfn_t smfn, mfn_t unused)
mfn_to_page(smfn)->up = 0;
return 0;
}
-#endif
void sh_reset_l3_up_pointers(struct vcpu *v)
{
@@ -2767,11 +2682,7 @@ void sh_reset_l3_up_pointers(struct vcpu *v)
NULL, /* fl1_64 */
NULL, /* l2_64 */
NULL, /* l2h_64 */
-#if CONFIG_PAGING_LEVELS >= 4
sh_clear_up_pointer, /* l3_64 */
-#else
- NULL, /* l3_64 */
-#endif
NULL, /* l4_64 */
NULL, /* p2m */
NULL /* unused */
@@ -2838,11 +2749,7 @@ static void sh_update_paging_modes(struct vcpu *v)
///
/// PV guest
///
-#if CONFIG_PAGING_LEVELS == 4
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
-#else /* CONFIG_PAGING_LEVELS == 3 */
- v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
-#endif
}
else
{
@@ -3325,11 +3232,9 @@ static int shadow_one_bit_disable(struct domain *d, u32 mode)
{
if ( v->arch.paging.mode )
v->arch.paging.mode->shadow.detach_old_tables(v);
-#if CONFIG_PAGING_LEVELS == 4
if ( !(v->arch.flags & TF_kernel_mode) )
make_cr3(v, pagetable_get_pfn(v->arch.guest_table_user));
else
-#endif
make_cr3(v, pagetable_get_pfn(v->arch.guest_table));
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -3481,14 +3386,6 @@ shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn,
/* Update the entry with new content */
safe_write_pte(p, new);
- /* install P2M in monitors for PAE Xen */
-#if CONFIG_PAGING_LEVELS == 3
- if ( level == 3 )
- /* We have written to the p2m l3: need to sync the per-vcpu
- * copies of it in the monitor tables */
- p2m_install_entry_in_monitors(d, (l3_pgentry_t *)p);
-#endif
-
#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
/* If we're doing FAST_FAULT_PATH, then shadow mode may have
cached the fact that this is an mmio region in the shadow
@@ -3815,14 +3712,12 @@ void shadow_audit_tables(struct vcpu *v)
SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 3), /* fl1_pae */
SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3), /* l2_pae */
SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3), /* l2h_pae */
-#if CONFIG_PAGING_LEVELS >= 4
SHADOW_INTERNAL_NAME(sh_audit_l1_table, 4), /* l1_64 */
SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 4), /* fl1_64 */
SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4), /* l2_64 */
SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4), /* l2h_64 */
SHADOW_INTERNAL_NAME(sh_audit_l3_table, 4), /* l3_64 */
SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4), /* l4_64 */
-#endif /* CONFIG_PAGING_LEVELS >= 4 */
NULL /* All the rest */
};
unsigned int mask;
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index f23be9c392..e7667e74e7 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -788,22 +788,10 @@ static inline void safe_write_entry(void *dst, void *src)
volatile unsigned long *d = dst;
unsigned long *s = src;
ASSERT(!((unsigned long) d & (sizeof (shadow_l1e_t) - 1)));
-#if CONFIG_PAGING_LEVELS == 3
- /* In PAE mode, pagetable entries are larger
- * than machine words, so won't get written atomically. We need to make
- * sure any other cpu running on these shadows doesn't see a
- * half-written entry. Do this by marking the entry not-present first,
- * then writing the high word before the low word. */
- BUILD_BUG_ON(sizeof (shadow_l1e_t) != 2 * sizeof (unsigned long));
- d[0] = 0;
- d[1] = s[1];
- d[0] = s[0];
-#else
/* In 64-bit, sizeof(pte) == sizeof(ulong) == 1 word,
* which will be an atomic write, since the entry is aligned. */
BUILD_BUG_ON(sizeof (shadow_l1e_t) != sizeof (unsigned long));
*d = *s;
-#endif
}
@@ -1444,7 +1432,7 @@ do { \
// probably wants to wait until the shadow types have been moved from
// shadow-types.h to shadow-private.h
//
-#if CONFIG_PAGING_LEVELS == 4 && GUEST_PAGING_LEVELS == 4
+#if GUEST_PAGING_LEVELS == 4
void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn)
{
struct domain *d = v->domain;
@@ -1496,7 +1484,7 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn)
}
#endif
-#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
+#if GUEST_PAGING_LEVELS >= 3
// For 3-on-3 PV guests, we need to make sure the xen mappings are in
// place, which means that we need to populate the l2h entry in the l3
// table.
@@ -1505,62 +1493,13 @@ static void sh_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn)
{
struct domain *d = v->domain;
shadow_l2e_t *sl2e;
-#if CONFIG_PAGING_LEVELS == 3
- int i;
-#else
if ( !is_pv_32on64_vcpu(v) )
return;
-#endif
sl2e = sh_map_domain_page(sl2hmfn);
ASSERT(sl2e != NULL);
ASSERT(sizeof (l2_pgentry_t) == sizeof (shadow_l2e_t));
-
-#if CONFIG_PAGING_LEVELS == 3
-
- /* Copy the common Xen mappings from the idle domain */
- memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
- &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
- L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
-
- /* Install the per-domain mappings for this domain */
- for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
- sl2e[shadow_l2_table_offset(PERDOMAIN_VIRT_START) + i] =
- shadow_l2e_from_mfn(
- page_to_mfn(perdomain_pt_page(d, i)),
- __PAGE_HYPERVISOR);
-
- /* We don't set up a linear mapping here because we can't until this
- * l2h is installed in an l3e. sh_update_linear_entries() handles
- * the linear mappings when CR3 (and so the fourth l3e) is loaded.
- * We zero them here, just as a safety measure.
- */
- for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
- sl2e[shadow_l2_table_offset(LINEAR_PT_VIRT_START) + i] =
- shadow_l2e_empty();
- for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
- sl2e[shadow_l2_table_offset(SH_LINEAR_PT_VIRT_START) + i] =
- shadow_l2e_empty();
-
- if ( shadow_mode_translate(d) )
- {
- /* Install the domain-specific p2m table */
- l3_pgentry_t *p2m;
- ASSERT(pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) != 0);
- p2m = sh_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))));
- for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
- {
- sl2e[shadow_l2_table_offset(RO_MPT_VIRT_START) + i] =
- (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
- ? shadow_l2e_from_mfn(_mfn(l3e_get_pfn(p2m[i])),
- __PAGE_HYPERVISOR)
- : shadow_l2e_empty();
- }
- sh_unmap_domain_page(p2m);
- }
-
-#else
/* Copy the common Xen mappings from the idle domain */
memcpy(
@@ -1568,16 +1507,11 @@ static void sh_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn)
&compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
-#endif
-
sh_unmap_domain_page(sl2e);
}
#endif
-
-
-
/**************************************************************************/
/* Create a shadow of a given guest page.
*/
@@ -1633,11 +1567,11 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
{
switch (shadow_type)
{
-#if CONFIG_PAGING_LEVELS == 4 && GUEST_PAGING_LEVELS == 4
+#if GUEST_PAGING_LEVELS == 4
case SH_type_l4_shadow:
sh_install_xen_entries_in_l4(v, gmfn, smfn); break;
#endif
-#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
+#if GUEST_PAGING_LEVELS >= 3
case SH_type_l2h_shadow:
sh_install_xen_entries_in_l2h(v, smfn); break;
#endif
@@ -1677,7 +1611,6 @@ sh_make_monitor_table(struct vcpu *v)
/* Guarantee we can get the memory we need */
shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
-#if CONFIG_PAGING_LEVELS == 4
{
mfn_t m4mfn;
m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
@@ -1726,43 +1659,6 @@ sh_make_monitor_table(struct vcpu *v)
#endif /* SHADOW_PAGING_LEVELS < 4 */
return m4mfn;
}
-
-#elif CONFIG_PAGING_LEVELS == 3
-
- {
- mfn_t m3mfn, m2mfn;
- l3_pgentry_t *l3e;
- l2_pgentry_t *l2e;
- int i;
-
- m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
- /* Remember the level of this table */
- mfn_to_page(m3mfn)->shadow_flags = 3;
-
- // Install a monitor l2 table in slot 3 of the l3 table.
- // This is used for all Xen entries, including linear maps
- m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
- mfn_to_page(m2mfn)->shadow_flags = 2;
- l3e = sh_map_domain_page(m3mfn);
- l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
- sh_install_xen_entries_in_l2h(v, m2mfn);
- /* Install the monitor's own linear map */
- l2e = sh_map_domain_page(m2mfn);
- for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
- l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
- (l3e_get_flags(l3e[i]) & _PAGE_PRESENT)
- ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR)
- : l2e_empty();
- sh_unmap_domain_page(l2e);
- sh_unmap_domain_page(l3e);
-
- SHADOW_PRINTK("new monitor table: %#lx\n", mfn_x(m3mfn));
- return m3mfn;
- }
-
-#else
-#error this should not happen
-#endif /* CONFIG_PAGING_LEVELS */
}
#endif /* SHADOW_PAGING_LEVELS == GUEST_PAGING_LEVELS */
@@ -2146,7 +2042,7 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
struct domain *d = v->domain;
ASSERT(mfn_to_page(mmfn)->u.sh.type == SH_type_monitor_table);
-#if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
+#if SHADOW_PAGING_LEVELS != 4
{
mfn_t m3mfn;
l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
@@ -2177,14 +2073,6 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
}
sh_unmap_domain_page(l4e);
}
-#elif CONFIG_PAGING_LEVELS == 3
- /* Need to destroy the l2 monitor page in slot 4 too */
- {
- l3_pgentry_t *l3e = sh_map_domain_page(mmfn);
- ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
- shadow_free(d, _mfn(l3e_get_pfn(l3e[3])));
- sh_unmap_domain_page(l3e);
- }
#endif
/* Put the memory back in the pool */
@@ -2382,46 +2270,6 @@ static int validate_gl2e(struct vcpu *v, void *new_ge, mfn_t sl2mfn, void *se)
}
l2e_propagate_from_guest(v, new_gl2e, sl1mfn, &new_sl2e, ft_prefetch);
- // check for updates to xen reserved slots in PV guests...
- // XXX -- need to revisit this for PV 3-on-4 guests.
- //
-#if SHADOW_PAGING_LEVELS < 4
-#if CONFIG_PAGING_LEVELS == SHADOW_PAGING_LEVELS
- if ( !shadow_mode_external(v->domain) )
- {
- int shadow_index = (((unsigned long)sl2p & ~PAGE_MASK) /
- sizeof(shadow_l2e_t));
- int reserved_xen_slot;
-
-#if SHADOW_PAGING_LEVELS == 3
- reserved_xen_slot =
- ((mfn_to_page(sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow) &&
- (shadow_index
- >= (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1))));
-#else /* SHADOW_PAGING_LEVELS == 2 */
- reserved_xen_slot = (shadow_index >= L2_PAGETABLE_FIRST_XEN_SLOT);
-#endif
-
- if ( unlikely(reserved_xen_slot) )
- {
- // attempt by the guest to write to a xen reserved slot
- //
- SHADOW_PRINTK("%s out-of-range update "
- "sl2mfn=%05lx index=0x%x val=%" SH_PRI_pte "\n",
- __func__, mfn_x(sl2mfn), shadow_index, new_sl2e.l2);
- if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT )
- {
- SHADOW_ERROR("out-of-range l2e update\n");
- result |= SHADOW_SET_ERROR;
- }
-
- // do not call shadow_set_l2e...
- return result;
- }
- }
-#endif /* CONFIG_PAGING_LEVELS == SHADOW_PAGING_LEVELS */
-#endif /* SHADOW_PAGING_LEVELS < 4 */
-
result |= shadow_set_l2e(v, sl2p, new_sl2e, sl2mfn);
return result;
@@ -3836,7 +3684,7 @@ sh_update_linear_entries(struct vcpu *v)
&& pagetable_get_pfn(v->arch.monitor_table) == 0 )
return;
-#if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS == 4)
+#if SHADOW_PAGING_LEVELS == 4
/* For PV, one l4e points at the guest l4, one points at the shadow
* l4. No maintenance required.
@@ -3862,7 +3710,7 @@ sh_update_linear_entries(struct vcpu *v)
}
}
-#elif (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS == 3)
+#elif SHADOW_PAGING_LEVELS == 3
/* PV: XXX
*
@@ -3923,102 +3771,6 @@ sh_update_linear_entries(struct vcpu *v)
else
domain_crash(d); /* XXX */
-#elif CONFIG_PAGING_LEVELS == 3
-
- /* PV: need to copy the guest's l3 entries into the guest-linear-map l2
- * entries in the shadow, and the shadow's l3 entries into the
- * shadow-linear-map l2 entries in the shadow. This is safe to do
- * because Xen does not let guests share high-slot l2 tables between l3s,
- * so we know we're not treading on anyone's toes.
- *
- * HVM: need to copy the shadow's l3 entries into the
- * shadow-linear-map l2 entries in the monitor table. This is safe
- * because we have one monitor table for each vcpu. The monitor's
- * own l3es don't need to be copied because they never change.
- * XXX That might change if we start stuffing things into the rest
- * of the monitor's virtual address space.
- */
- {
- l2_pgentry_t *l2e, new_l2e;
- shadow_l3e_t *guest_l3e = NULL, *shadow_l3e;
- int i;
- int unmap_l2e = 0;
-
-#if GUEST_PAGING_LEVELS == 2
-
- /* Shadow l3 tables were built by sh_update_cr3 */
- BUG_ON(!shadow_mode_external(d)); /* PV 2-on-3 is unsupported */
- shadow_l3e = (shadow_l3e_t *)&v->arch.paging.shadow.l3table;
-
-#else /* GUEST_PAGING_LEVELS == 3 */
-
- shadow_l3e = (shadow_l3e_t *)&v->arch.paging.shadow.l3table;
- guest_l3e = (guest_l3e_t *)&v->arch.paging.shadow.gl3e;
-
-#endif /* GUEST_PAGING_LEVELS */
-
- /* Choose where to write the entries, using linear maps if possible */
- if ( shadow_mode_external(d) )
- {
- if ( v == current )
- {
- /* From the monitor tables, it's safe to use linear maps
- * to update monitor l2s */
- l2e = __linear_l2_table + (3 * L2_PAGETABLE_ENTRIES);
- }
- else
- {
- /* Map the monitor table's high l2 */
- l3_pgentry_t *l3e;
- l3e = sh_map_domain_page(
- pagetable_get_mfn(v->arch.monitor_table));
- ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
- l2e = sh_map_domain_page(_mfn(l3e_get_pfn(l3e[3])));
- unmap_l2e = 1;
- sh_unmap_domain_page(l3e);
- }
- }
- else
- {
- /* Map the shadow table's high l2 */
- ASSERT(shadow_l3e_get_flags(shadow_l3e[3]) & _PAGE_PRESENT);
- l2e = sh_map_domain_page(shadow_l3e_get_mfn(shadow_l3e[3]));
- unmap_l2e = 1;
- }
-
- /* Write linear mapping of guest (only in PV, and only when
- * not translated). */
- if ( !shadow_mode_translate(d) )
- {
- for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
- {
- new_l2e =
- ((shadow_l3e_get_flags(guest_l3e[i]) & _PAGE_PRESENT)
- ? l2e_from_pfn(mfn_x(shadow_l3e_get_mfn(guest_l3e[i])),
- __PAGE_HYPERVISOR)
- : l2e_empty());
- safe_write_entry(
- &l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i],
- &new_l2e);
- }
- }
-
- /* Write linear mapping of shadow. */
- for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
- {
- new_l2e = (shadow_l3e_get_flags(shadow_l3e[i]) & _PAGE_PRESENT)
- ? l2e_from_pfn(mfn_x(shadow_l3e_get_mfn(shadow_l3e[i])),
- __PAGE_HYPERVISOR)
- : l2e_empty();
- safe_write_entry(
- &l2e[l2_table_offset(SH_LINEAR_PT_VIRT_START) + i],
- &new_l2e);
- }
-
- if ( unmap_l2e )
- sh_unmap_domain_page(l2e);
- }
-
#else
#error this should not happen
#endif
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index b7947e51c3..a467e48ef6 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -165,11 +165,9 @@ extern void shadow_audit_tables(struct vcpu *v);
#include "multi.h"
#undef GUEST_LEVELS
-#if CONFIG_PAGING_LEVELS == 4
#define GUEST_LEVELS 4
#include "multi.h"
#undef GUEST_LEVELS
-#endif /* CONFIG_PAGING_LEVELS == 4 */
/* Shadow type codes */
#define SH_type_none (0U) /* on the shadow free list */
@@ -214,7 +212,6 @@ static inline int sh_type_is_pinnable(struct vcpu *v, unsigned int t)
* shadows so they don't just evaporate on every context switch.
* For all other guests, we'd rather use the up-pointer field in l3s. */
if ( unlikely((v->domain->arch.paging.shadow.opt_flags & SHOPT_LINUX_L3_TOPLEVEL)
- && CONFIG_PAGING_LEVELS >= 4
&& t == SH_type_l3_64_shadow) )
return 1;
#endif