aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-07-08 10:03:25 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-07-08 10:03:25 +0100
commite1aa7bad219e461ea1eef1f612f5f9e7f7150a82 (patch)
tree0d42d053caf81152c866386870bcef673e3f7113
parent7a4e2b1920d3d5480cf33d8139199158979d046a (diff)
downloadxen-e1aa7bad219e461ea1eef1f612f5f9e7f7150a82.tar.gz
xen-e1aa7bad219e461ea1eef1f612f5f9e7f7150a82.tar.bz2
xen-e1aa7bad219e461ea1eef1f612f5f9e7f7150a82.zip
xen: make the shadow allocation hypercalls include the p2m memory
in the total shadow allocation. This makes the effect of allocation changes consistent regardless of p2m activity on boot. Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com> xen-unstable changeset: 21750:92ac9536ac5a xen-unstable date: Thu Jul 08 09:52:51 2010 +0100
-rw-r--r--xen/arch/x86/mm/hap/hap.c13
-rw-r--r--xen/arch/x86/mm/shadow/common.c29
2 files changed, 32 insertions, 10 deletions
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 14b38a39ce..3f612d3998 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -334,7 +334,8 @@ static void hap_free_p2m_page(struct domain *d, struct page_info *pg)
static unsigned int
hap_get_allocation(struct domain *d)
{
- unsigned int pg = d->arch.paging.hap.total_pages;
+ unsigned int pg = d->arch.paging.hap.total_pages
+ + d->arch.paging.hap.p2m_pages;
return ((pg >> (20 - PAGE_SHIFT))
+ ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
@@ -349,6 +350,11 @@ hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
ASSERT(hap_locked_by_me(d));
+ if ( pages < d->arch.paging.hap.p2m_pages )
+ pages = 0;
+ else
+ pages -= d->arch.paging.hap.p2m_pages;
+
while ( d->arch.paging.hap.total_pages != pages )
{
if ( d->arch.paging.hap.total_pages < pages )
@@ -367,6 +373,11 @@ hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
else if ( d->arch.paging.hap.total_pages > pages )
{
/* Need to return memory to domheap */
+ if ( page_list_empty(&d->arch.paging.hap.freelist) )
+ {
+ HAP_PRINTK("failed to free enough hap pages.\n");
+ return -ENOMEM;
+ }
pg = page_list_remove_head(&d->arch.paging.hap.freelist);
ASSERT(pg);
d->arch.paging.hap.free_pages--;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index c431626bbf..35549ee2a1 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1817,14 +1817,24 @@ static unsigned int sh_set_allocation(struct domain *d,
unsigned int j, order = shadow_max_order(d);
ASSERT(shadow_locked_by_me(d));
-
- /* Don't allocate less than the minimum acceptable, plus one page per
- * megabyte of RAM (for the p2m table) */
- lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256);
- if ( pages > 0 && pages < lower_bound )
- pages = lower_bound;
- /* Round up to largest block size */
- pages = (pages + ((1<<SHADOW_MAX_ORDER)-1)) & ~((1<<SHADOW_MAX_ORDER)-1);
+
+ if ( pages > 0 )
+ {
+ /* Check for minimum value. */
+ if ( pages < d->arch.paging.shadow.p2m_pages )
+ pages = 0;
+ else
+ pages -= d->arch.paging.shadow.p2m_pages;
+
+ /* Don't allocate less than the minimum acceptable, plus one page per
+ * megabyte of RAM (for the p2m table) */
+ lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256);
+ if ( pages < lower_bound )
+ pages = lower_bound;
+
+ /* Round up to largest block size */
+ pages = (pages + ((1<<SHADOW_MAX_ORDER)-1)) & ~((1<<SHADOW_MAX_ORDER)-1);
+ }
SHADOW_PRINTK("current %i target %i\n",
d->arch.paging.shadow.total_pages, pages);
@@ -1884,7 +1894,8 @@ static unsigned int sh_set_allocation(struct domain *d,
/* Return the size of the shadow pool, rounded up to the nearest MB */
static unsigned int shadow_get_allocation(struct domain *d)
{
- unsigned int pg = d->arch.paging.shadow.total_pages;
+ unsigned int pg = d->arch.paging.shadow.total_pages
+ + d->arch.paging.shadow.p2m_pages;
return ((pg >> (20 - PAGE_SHIFT))
+ ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
}