aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/mm
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2012-09-12 13:29:30 +0100
committerKeir Fraser <keir@xen.org>2012-09-12 13:29:30 +0100
commit5d1181a5ea5e0f11d481a94b16ed00d883f9726e (patch)
tree4b43be5829873f2ec1a1b2d0f7e26b15dffb11c6 /xen/arch/x86/mm
parentb314cd733413babc5978b819793ad5c77f094adf (diff)
downloadxen-5d1181a5ea5e0f11d481a94b16ed00d883f9726e.tar.gz
xen-5d1181a5ea5e0f11d481a94b16ed00d883f9726e.tar.bz2
xen-5d1181a5ea5e0f11d481a94b16ed00d883f9726e.zip
xen: Remove x86_32 build target.
Signed-off-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/mm')
-rw-r--r--xen/arch/x86/mm/p2m-pt.c9
-rw-r--r--xen/arch/x86/mm/p2m.c14
-rw-r--r--xen/arch/x86/mm/shadow/Makefile1
-rw-r--r--xen/arch/x86/mm/shadow/common.c74
-rw-r--r--xen/arch/x86/mm/shadow/multi.c38
5 files changed, 16 insertions, 120 deletions
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 5e7035f6a9..f0d1423705 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -63,21 +63,12 @@
static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn)
{
unsigned long flags;
-#ifdef __x86_64__
/*
* AMD IOMMU: When we share p2m table with iommu, bit 9 - bit 11 will be
* used for iommu hardware to encode next io page level. Bit 59 - bit 62
* are used for iommu flags, We could not use these bits to store p2m types.
*/
flags = (unsigned long)(t & 0x7f) << 12;
-#else
- flags = (t & 0x7UL) << 9;
-#endif
-
-#ifndef __x86_64__
- /* 32-bit builds don't support a lot of the p2m types */
- BUG_ON(t > p2m_populate_on_demand);
-#endif
switch(t)
{
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 344fb9928d..e84549f932 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -170,7 +170,6 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
-#ifdef __x86_64__
if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
@@ -180,9 +179,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
(void)mem_sharing_notify_enomem(p2m->domain, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
}
-#endif
-#ifdef __x86_64__
if (unlikely((p2m_is_broken(*t))))
{
/* Return invalid_mfn to avoid caller's access */
@@ -190,7 +187,6 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
if ( q & P2M_ALLOC )
domain_crash(p2m->domain);
}
-#endif
return mfn;
}
@@ -412,18 +408,15 @@ void p2m_teardown(struct p2m_domain *p2m)
{
struct page_info *pg;
struct domain *d = p2m->domain;
-#ifdef __x86_64__
unsigned long gfn;
p2m_type_t t;
mfn_t mfn;
-#endif
if (p2m == NULL)
return;
p2m_lock(p2m);
-#ifdef __x86_64__
/* Try to unshare any remaining shared p2m entries. Safeguard
* Since relinquish_shared_pages should have done the work. */
for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
@@ -439,7 +432,6 @@ void p2m_teardown(struct p2m_domain *p2m)
BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
}
}
-#endif
p2m->phys_table = pagetable_null();
@@ -565,7 +557,6 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
for ( i = 0; i < (1UL << page_order); i++ )
{
omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL);
-#ifdef __x86_64__
if ( p2m_is_shared(ot) )
{
/* Do an unshare to cleanly take care of all corner
@@ -592,7 +583,6 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL);
ASSERT(!p2m_is_shared(ot));
}
-#endif /* __x86_64__ */
if ( p2m_is_grant(ot) )
{
/* Really shouldn't be unmapping grant maps this way */
@@ -840,7 +830,6 @@ set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
return rc;
}
-#ifdef __x86_64__
/**
* p2m_mem_paging_nominate - Mark a guest page as to-be-paged-out
* @d: guest domain
@@ -1430,9 +1419,6 @@ int p2m_get_mem_access(struct domain *d, unsigned long pfn,
return 0;
}
-
-#endif /* __x86_64__ */
-
static struct p2m_domain *
p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
{
diff --git a/xen/arch/x86/mm/shadow/Makefile b/xen/arch/x86/mm/shadow/Makefile
index 3333d43679..b3b0cde58c 100644
--- a/xen/arch/x86/mm/shadow/Makefile
+++ b/xen/arch/x86/mm/shadow/Makefile
@@ -1,4 +1,3 @@
-obj-$(x86_32) += common.o guest_2.o guest_3.o
obj-$(x86_64) += common.o guest_2.o guest_3.o guest_4.o
guest_%.o: multi.c Makefile
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index dc245befee..2039390b1f 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -276,12 +276,6 @@ hvm_emulate_cmpxchg(enum x86_segment seg,
return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
v, addr, old[0], new[0], bytes, sh_ctxt);
-#ifdef __i386__
- if ( bytes == 8 )
- return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b(
- v, addr, old[0], old[1], new[0], new[1], sh_ctxt);
-#endif
-
return X86EMUL_UNHANDLEABLE;
}
@@ -353,12 +347,6 @@ pv_emulate_cmpxchg(enum x86_segment seg,
return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
v, offset, old[0], new[0], bytes, sh_ctxt);
-#ifdef __i386__
- if ( bytes == 8 )
- return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b(
- v, offset, old[0], old[1], new[0], new[1], sh_ctxt);
-#endif
-
return X86EMUL_UNHANDLEABLE;
}
@@ -2879,29 +2867,23 @@ static void sh_update_paging_modes(struct vcpu *v)
v->arch.guest_table = d->arch.paging.shadow.unpaged_pagetable;
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
}
+ else if ( hvm_long_mode_enabled(v) )
+ {
+ // long mode guest...
+ v->arch.paging.mode =
+ &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
+ }
+ else if ( hvm_pae_enabled(v) )
+ {
+ // 32-bit PAE mode guest...
+ v->arch.paging.mode =
+ &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
+ }
else
{
-#ifdef __x86_64__
- if ( hvm_long_mode_enabled(v) )
- {
- // long mode guest...
- v->arch.paging.mode =
- &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
- }
- else
-#endif
- if ( hvm_pae_enabled(v) )
- {
- // 32-bit PAE mode guest...
- v->arch.paging.mode =
- &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
- }
- else
- {
- // 32-bit 2 level guest...
- v->arch.paging.mode =
- &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
- }
+ // 32-bit 2 level guest...
+ v->arch.paging.mode =
+ &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
}
if ( pagetable_is_null(v->arch.monitor_table) )
@@ -3664,11 +3646,6 @@ int shadow_track_dirty_vram(struct domain *d,
}
else
{
-#ifdef __i386__
- unsigned long map_mfn = INVALID_MFN;
- void *map_sl1p = NULL;
-#endif
-
/* Iterate over VRAM to track dirty bits. */
for ( i = 0; i < nr; i++ ) {
mfn_t mfn = get_gfn_query_unlocked(d, begin_pfn + i, &t);
@@ -3702,21 +3679,7 @@ int shadow_track_dirty_vram(struct domain *d,
{
/* Hopefully the most common case: only one mapping,
* whose dirty bit we can use. */
- l1_pgentry_t *sl1e;
-#ifdef __i386__
- void *sl1p = map_sl1p;
- unsigned long sl1mfn = paddr_to_pfn(sl1ma);
-
- if ( sl1mfn != map_mfn ) {
- if ( map_sl1p )
- sh_unmap_domain_page(map_sl1p);
- map_sl1p = sl1p = sh_map_domain_page(_mfn(sl1mfn));
- map_mfn = sl1mfn;
- }
- sl1e = sl1p + (sl1ma & ~PAGE_MASK);
-#else
- sl1e = maddr_to_virt(sl1ma);
-#endif
+ l1_pgentry_t *sl1e = maddr_to_virt(sl1ma);
if ( l1e_get_flags(*sl1e) & _PAGE_DIRTY )
{
@@ -3743,11 +3706,6 @@ int shadow_track_dirty_vram(struct domain *d,
}
}
-#ifdef __i386__
- if ( map_sl1p )
- sh_unmap_domain_page(map_sl1p);
-#endif
-
rc = -EFAULT;
if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) == 0 ) {
memset(dirty_vram->dirty_bitmap, 0, dirty_size);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 4f56ae6833..f23be9c392 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -5096,41 +5096,6 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, unsigned long vaddr,
return rv;
}
-#ifdef __i386__
-static int
-sh_x86_emulate_cmpxchg8b(struct vcpu *v, unsigned long vaddr,
- unsigned long old_lo, unsigned long old_hi,
- unsigned long new_lo, unsigned long new_hi,
- struct sh_emulate_ctxt *sh_ctxt)
-{
- void *addr;
- u64 old, new, prev;
- int rv = X86EMUL_OKAY;
-
- /* Unaligned writes are only acceptable on HVM */
- if ( (vaddr & 7) && !is_hvm_vcpu(v) )
- return X86EMUL_UNHANDLEABLE;
-
- addr = emulate_map_dest(v, vaddr, 8, sh_ctxt);
- if ( emulate_map_dest_failed(addr) )
- return (long)addr;
-
- old = (((u64) old_hi) << 32) | (u64) old_lo;
- new = (((u64) new_hi) << 32) | (u64) new_lo;
-
- paging_lock(v->domain);
- prev = cmpxchg(((u64 *)addr), old, new);
-
- if ( prev != old )
- rv = X86EMUL_CMPXCHG_FAILED;
-
- emulate_unmap_dest(v, addr, 8, sh_ctxt);
- shadow_audit_tables(v);
- paging_unlock(v->domain);
- return rv;
-}
-#endif
-
/**************************************************************************/
/* Audit tools */
@@ -5455,9 +5420,6 @@ const struct paging_mode sh_paging_mode = {
.shadow.detach_old_tables = sh_detach_old_tables,
.shadow.x86_emulate_write = sh_x86_emulate_write,
.shadow.x86_emulate_cmpxchg = sh_x86_emulate_cmpxchg,
-#ifdef __i386__
- .shadow.x86_emulate_cmpxchg8b = sh_x86_emulate_cmpxchg8b,
-#endif
.shadow.make_monitor_table = sh_make_monitor_table,
.shadow.destroy_monitor_table = sh_destroy_monitor_table,
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC