aboutsummaryrefslogtreecommitdiffstats
path: root/tools/libxc/xc_hvm_build_x86.c
diff options
context:
space:
mode:
authorDan Magenheimer <dan.magenheimer@oracle.com>2013-02-25 15:19:14 -0500
committerIan Jackson <Ian.Jackson@eu.citrix.com>2013-04-16 16:21:50 +0100
commitfc67e9dc0c1fe0cebbc2d77fae5aa721e7089615 (patch)
treec2cec935ac2fc27724fa14bb8045bd1b4ebafbb5 /tools/libxc/xc_hvm_build_x86.c
parent749bc93f7a1ad47640cc7876d27641e98a08bf61 (diff)
downloadxen-fc67e9dc0c1fe0cebbc2d77fae5aa721e7089615.tar.gz
xen-fc67e9dc0c1fe0cebbc2d77fae5aa721e7089615.tar.bz2
xen-fc67e9dc0c1fe0cebbc2d77fae5aa721e7089615.zip
xc: use XENMEM_claim_pages hypercall during guest creation.
We add an extra parameter to the structures passed to the PV routine (arch_setup_meminit) and HVM routine (setup_guest) that determines whether the claim hypercall is to be done. The contents of the 'claim_enabled' is defined as an 'int' in case the hypercall expands in the future with extra flags (for example for per-NUMA allocation). For right now the proper values are: 0 to disable it or 1 to enable it. If the hypervisor does not support this function, the xc_domain_claim_pages and xc_domain_get_outstanding_pages will silently return 0 (and set errno to zero). Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com> [v2: Updated per Ian's recommendations] [v3: Added support for out-of-sync hypervisor] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Diffstat (limited to 'tools/libxc/xc_hvm_build_x86.c')
-rw-r--r--tools/libxc/xc_hvm_build_x86.c23
1 files changed, 19 insertions, 4 deletions
diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c
index 3b5d77715f..ab33a7fea7 100644
--- a/tools/libxc/xc_hvm_build_x86.c
+++ b/tools/libxc/xc_hvm_build_x86.c
@@ -252,6 +252,7 @@ static int setup_guest(xc_interface *xch,
unsigned long stat_normal_pages = 0, stat_2mb_pages = 0,
stat_1gb_pages = 0;
int pod_mode = 0;
+ int claim_enabled = args->claim_enabled;
if ( nr_pages > target_pages )
pod_mode = XENMEMF_populate_on_demand;
@@ -329,6 +330,16 @@ static int setup_guest(xc_interface *xch,
xch, dom, 0xa0, 0, pod_mode, &page_array[0x00]);
cur_pages = 0xc0;
stat_normal_pages = 0xc0;
+
+ /* try to claim pages for early warning of insufficient memory available */
+ if ( claim_enabled ) {
+ rc = xc_domain_claim_pages(xch, dom, nr_pages - cur_pages);
+ if ( rc != 0 )
+ {
+ PERROR("Could not allocate memory for HVM guest as we cannot claim memory!");
+ goto error_out;
+ }
+ }
while ( (rc == 0) && (nr_pages > cur_pages) )
{
/* Clip count to maximum 1GB extent. */
@@ -506,12 +517,16 @@ static int setup_guest(xc_interface *xch,
munmap(page0, PAGE_SIZE);
}
- free(page_array);
- return 0;
-
+ rc = 0;
+ goto out;
error_out:
+ rc = -1;
+ out:
+ /* ensure no unclaimed pages are left unused */
+ xc_domain_claim_pages(xch, dom, 0 /* cancels the claim */);
+
free(page_array);
- return -1;
+ return rc;
}
/* xc_hvm_build: