aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/page_alloc.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-01-23 14:15:16 +0100
committerJan Beulich <jbeulich@suse.com>2013-01-23 14:15:16 +0100
commitadc5afbf1c70ef55c260fb93e4b8ce5ccb918706 (patch)
tree2893064bbbb1e457fba52be6eadc8587a8497761 /xen/common/page_alloc.c
parent53a4e820f2888b1c7fcac6cc65c5ce854a2ff1ea (diff)
downloadxen-adc5afbf1c70ef55c260fb93e4b8ce5ccb918706.tar.gz
xen-adc5afbf1c70ef55c260fb93e4b8ce5ccb918706.tar.bz2
xen-adc5afbf1c70ef55c260fb93e4b8ce5ccb918706.zip
x86: support up to 16Tb
This mainly involves adjusting the number of L4 entries needing copying between page tables (which is now different between PV and HVM/idle domains), and changing the cutoff point and method when more than the supported amount of memory is found in a system. Since TMEM doesn't currently cope with the full 1:1 map not always being visible, it gets forcefully disabled in that case. Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org> Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Diffstat (limited to 'xen/common/page_alloc.c')
-rw-r--r--xen/common/page_alloc.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index e273bb7eff..9593743ef5 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -255,6 +255,9 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
unsigned long needed = (sizeof(**_heap) +
sizeof(**avail) * NR_ZONES +
PAGE_SIZE - 1) >> PAGE_SHIFT;
+#ifdef DIRECTMAP_VIRT_END
+ unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
+#endif
int i, j;
if ( !first_node_initialised )
@@ -266,14 +269,14 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
}
#ifdef DIRECTMAP_VIRT_END
else if ( *use_tail && nr >= needed &&
- (mfn + nr) <= (virt_to_mfn(DIRECTMAP_VIRT_END - 1) + 1) )
+ (mfn + nr) <= (virt_to_mfn(eva - 1) + 1) )
{
_heap[node] = mfn_to_virt(mfn + nr - needed);
avail[node] = mfn_to_virt(mfn + nr - 1) +
PAGE_SIZE - sizeof(**avail) * NR_ZONES;
}
else if ( nr >= needed &&
- (mfn + needed) <= (virt_to_mfn(DIRECTMAP_VIRT_END - 1) + 1) )
+ (mfn + needed) <= (virt_to_mfn(eva - 1) + 1) )
{
_heap[node] = mfn_to_virt(mfn);
avail[node] = mfn_to_virt(mfn + needed - 1) +
@@ -1205,6 +1208,13 @@ void free_xenheap_pages(void *v, unsigned int order)
#else
+static unsigned int __read_mostly xenheap_bits;
+
+void __init xenheap_max_mfn(unsigned long mfn)
+{
+ xenheap_bits = fls(mfn) + PAGE_SHIFT - 1;
+}
+
void init_xenheap_pages(paddr_t ps, paddr_t pe)
{
init_domheap_pages(ps, pe);
@@ -1217,6 +1227,11 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
ASSERT(!in_irq());
+ if ( xenheap_bits && (memflags >> _MEMF_bits) > xenheap_bits )
+ memflags &= ~MEMF_bits(~0);
+ if ( !(memflags >> _MEMF_bits) )
+ memflags |= MEMF_bits(xenheap_bits);
+
pg = alloc_domheap_pages(NULL, order, memflags);
if ( unlikely(pg == NULL) )
return NULL;