aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/setup.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-01-23 14:15:16 +0100
committerJan Beulich <jbeulich@suse.com>2013-01-23 14:15:16 +0100
commitadc5afbf1c70ef55c260fb93e4b8ce5ccb918706 (patch)
tree2893064bbbb1e457fba52be6eadc8587a8497761 /xen/arch/x86/setup.c
parent53a4e820f2888b1c7fcac6cc65c5ce854a2ff1ea (diff)
downloadxen-adc5afbf1c70ef55c260fb93e4b8ce5ccb918706.tar.gz
xen-adc5afbf1c70ef55c260fb93e4b8ce5ccb918706.tar.bz2
xen-adc5afbf1c70ef55c260fb93e4b8ce5ccb918706.zip
x86: support up to 16Tb
This mainly involves adjusting the number of L4 entries needing copying between page tables (which is now different between PV and HVM/idle domains), and changing the cutoff point and method when more than the supported amount of memory is found in a system. Since TMEM doesn't currently cope with the full 1:1 map not always being visible, it gets forcefully disabled in that case. Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org> Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Diffstat (limited to 'xen/arch/x86/setup.c')
-rw-r--r--xen/arch/x86/setup.c53
1 files changed, 50 insertions, 3 deletions
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index f9ed5804b2..e06ada1a70 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -25,6 +25,7 @@
#include <xen/dmi.h>
#include <xen/pfn.h>
#include <xen/nodemask.h>
+#include <xen/tmem_xen.h> /* for opt_tmem only */
#include <public/version.h>
#include <compat/platform.h>
#include <compat/xen.h>
@@ -381,6 +382,9 @@ static void __init setup_max_pdx(void)
if ( max_pdx > FRAMETABLE_NR )
max_pdx = FRAMETABLE_NR;
+ if ( max_pdx >= PAGE_LIST_NULL )
+ max_pdx = PAGE_LIST_NULL - 1;
+
max_page = pdx_to_pfn(max_pdx - 1) + 1;
}
@@ -1031,9 +1035,23 @@ void __init __start_xen(unsigned long mbi_p)
/* Create new mappings /before/ passing memory to the allocator. */
if ( map_e < e )
{
- map_pages_to_xen((unsigned long)__va(map_e), map_e >> PAGE_SHIFT,
- (e - map_e) >> PAGE_SHIFT, PAGE_HYPERVISOR);
- init_boot_pages(map_e, e);
+ uint64_t limit = __pa(HYPERVISOR_VIRT_END - 1) + 1;
+ uint64_t end = min(e, limit);
+
+ if ( map_e < end )
+ {
+ map_pages_to_xen((unsigned long)__va(map_e), PFN_DOWN(map_e),
+ PFN_DOWN(end - map_e), PAGE_HYPERVISOR);
+ init_boot_pages(map_e, end);
+ map_e = end;
+ }
+ }
+ if ( map_e < e )
+ {
+ /* This range must not be passed to the boot allocator and
+ * must also not be mapped with _PAGE_GLOBAL. */
+ map_pages_to_xen((unsigned long)__va(map_e), PFN_DOWN(map_e),
+ PFN_DOWN(e - map_e), __PAGE_HYPERVISOR);
}
if ( s < map_s )
{
@@ -1104,6 +1122,35 @@ void __init __start_xen(unsigned long mbi_p)
end_boot_allocator();
system_state = SYS_STATE_boot;
+ if ( max_page - 1 > virt_to_mfn(HYPERVISOR_VIRT_END - 1) )
+ {
+ unsigned long limit = virt_to_mfn(HYPERVISOR_VIRT_END - 1);
+ uint64_t mask = PAGE_SIZE - 1;
+
+ xenheap_max_mfn(limit);
+
+ /* Pass the remaining memory to the allocator. */
+ for ( i = 0; i < boot_e820.nr_map; i++ )
+ {
+ uint64_t s, e;
+
+ s = (boot_e820.map[i].addr + mask) & ~mask;
+ e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask;
+ if ( PFN_DOWN(e) <= limit )
+ continue;
+ if ( PFN_DOWN(s) <= limit )
+ s = pfn_to_paddr(limit + 1);
+ init_domheap_pages(s, e);
+ }
+
+ if ( opt_tmem )
+ {
+ printk(XENLOG_WARNING
+ "TMEM physical RAM limit exceeded, disabling TMEM\n");
+ opt_tmem = 0;
+ }
+ }
+
vm_init();
vesa_init();