aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2007-12-06 13:39:19 +0000
committerKeir Fraser <keir.fraser@citrix.com>2007-12-06 13:39:19 +0000
commit5cc77f9098763fc830db0a2b2aa53d8254305084 (patch)
tree02b33e2487fbb318cfdef362e7ebeb68d7de534f
parent8fd28ec7bdee9cb07c7af9bede6683e5edf02744 (diff)
downloadxen-5cc77f9098763fc830db0a2b2aa53d8254305084.tar.gz
xen-5cc77f9098763fc830db0a2b2aa53d8254305084.tar.bz2
xen-5cc77f9098763fc830db0a2b2aa53d8254305084.zip
32-on-64: Fix domain address-size clamping, implement
copy-on-grant-transfer, and eliminate 166GB memory limit for x86/64 Xen. Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
-rw-r--r--xen/arch/x86/domain.c2
-rw-r--r--xen/arch/x86/domain_build.c2
-rw-r--r--xen/arch/x86/e820.c7
-rw-r--r--xen/arch/x86/x86_64/mm.c2
-rw-r--r--xen/common/grant_table.c25
-rw-r--r--xen/common/memory.c15
-rw-r--r--xen/common/page_alloc.c16
7 files changed, 37 insertions, 32 deletions
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 4da77927d5..588a7fb1f7 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -319,8 +319,6 @@ int switch_native(struct domain *d)
release_compat_l4(d->vcpu[vcpuid]);
}
- d->arch.physaddr_bitsize = 64;
-
return 0;
}
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 7279f20075..e0fb1967e2 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -367,7 +367,7 @@ int __init construct_dom0(
#ifdef CONFIG_COMPAT
HYPERVISOR_COMPAT_VIRT_START(d) =
max_t(unsigned int, m2p_compat_vstart, value);
- d->arch.physaddr_bitsize = !is_pv_32on64_domain(d) ? 64 :
+ d->arch.physaddr_bitsize =
fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1
+ (PAGE_SIZE - 2);
if ( value > (!is_pv_32on64_domain(d) ?
diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c
index 2fbfa87160..f09a48aed7 100644
--- a/xen/arch/x86/e820.c
+++ b/xen/arch/x86/e820.c
@@ -370,13 +370,6 @@ static void __init machine_specific_memory_setup(
"can be accessed by Xen in 32-bit mode.");
#endif
-#ifdef __x86_64__
- clip_to_limit((uint64_t)(MACH2PHYS_COMPAT_VIRT_END -
- __HYPERVISOR_COMPAT_VIRT_START) << 10,
- "Only the first %u GB of the physical memory map "
- "can be accessed by 32-on-64 guests.");
-#endif
-
reserve_dmi_region();
}
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 5bf706e04b..5d9490a4fe 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -442,7 +442,7 @@ int check_descriptor(const struct domain *dom, struct desc_struct *d)
unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits)
{
- if ( d == NULL )
+ if ( (d == NULL) || !is_pv_32on64_domain(d) )
return bits;
return min(d->arch.physaddr_bitsize, bits);
}
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 5e5b353ed9..9ba4ed11ad 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1081,11 +1081,36 @@ gnttab_transfer(
if ( xsm_grant_transfer(d, e) )
{
+ unlock_and_copyback:
rcu_unlock_domain(e);
+ page->count_info &= ~(PGC_count_mask|PGC_allocated);
+ free_domheap_page(page);
gop.status = GNTST_permission_denied;
goto copyback;
}
+ if ( (1UL << domain_clamp_alloc_bitsize(e, BITS_PER_LONG-1)) <= mfn )
+ {
+ struct page_info *new_page;
+ void *sp, *dp;
+
+ new_page = alloc_domheap_pages(
+ NULL, 0,
+ MEMF_bits(domain_clamp_alloc_bitsize(e, BITS_PER_LONG-1)));
+ if ( new_page == NULL )
+ goto unlock_and_copyback;
+
+ sp = map_domain_page(mfn);
+ dp = map_domain_page(page_to_mfn(new_page));
+ memcpy(dp, sp, PAGE_SIZE);
+ unmap_domain_page(dp);
+ unmap_domain_page(sp);
+
+ page->count_info &= ~(PGC_count_mask|PGC_allocated);
+ free_domheap_page(page);
+ page = new_page;
+ }
+
spin_lock(&e->page_alloc_lock);
/*
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 704497944a..90ff8a9827 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -319,18 +319,6 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
goto fail_early;
}
- if ( (exch.out.address_bits != 0) &&
- (exch.out.address_bits <
- (get_order_from_pages(max_page) + PAGE_SHIFT)) )
- {
- if ( exch.out.address_bits <= PAGE_SHIFT )
- {
- rc = -ENOMEM;
- goto fail_early;
- }
- memflags = MEMF_bits(exch.out.address_bits);
- }
-
if ( exch.in.extent_order <= exch.out.extent_order )
{
in_chunk_order = exch.out.extent_order - exch.in.extent_order;
@@ -353,6 +341,9 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
}
d = current->domain;
+ memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
+ d, exch.out.address_bits ? : BITS_PER_LONG));
+
cpu = select_local_cpu(d);
for ( i = (exch.nr_exchanged >> in_chunk_order);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 65639c1f18..9de3f31ebe 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -786,15 +786,13 @@ struct page_info *__alloc_domheap_pages(
ASSERT(!in_irq());
- if ( bits )
- {
- bits = domain_clamp_alloc_bitsize(d, bits);
- if ( bits <= (PAGE_SHIFT + 1) )
- return NULL;
- bits -= PAGE_SHIFT + 1;
- if ( bits < zone_hi )
- zone_hi = bits;
- }
+ bits = domain_clamp_alloc_bitsize(d, bits ? : BITS_PER_LONG);
+ if ( bits <= (PAGE_SHIFT + 1) )
+ return NULL;
+
+ bits -= PAGE_SHIFT + 1;
+ if ( bits < zone_hi )
+ zone_hi = bits;
if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize )
{