aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2012-09-20 10:46:23 +0200
committerJan Beulich <jbeulich@suse.com>2012-09-20 10:46:23 +0200
commitffeab3a855f39d9d6477fffa2704069c0916e1de (patch)
tree565f5610f2e301ec850c2bc5ecdaa58e12822067
parent128961792e2df038c2891e0f59fd996449cb68b8 (diff)
downloadxen-ffeab3a855f39d9d6477fffa2704069c0916e1de.tar.gz
xen-ffeab3a855f39d9d6477fffa2704069c0916e1de.tar.bz2
xen-ffeab3a855f39d9d6477fffa2704069c0916e1de.zip
x86/PoD: prevent guest from being destroyed upon early access to its memory
When an external agent (e.g. a monitoring daemon) happens to access the memory of a PoD guest prior to setting the PoD target, that access must fail for there not being any page in the PoD cache, and only the space above the low 2Mb gets scanned for victim pages (while only the low 2Mb got real pages populated so far). To accomodate for this - set the PoD target first - do all physmap population in PoD mode (i.e. not just large [2Mb or 1Gb] pages) - slightly lift the restrictions enforced by p2m_pod_set_mem_target() to accomodate for the changed tools behavior Tested-by: Jürgen Groß <juergen.gross@ts.fujitsu.com> (in a 4.0.x based incarnation) Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: George Dunlap <george.dunlap@eu.citrix.com> xen-unstable changeset: 25754:be8ae5439a88 xen-unstable date: Thu Aug 16 08:14:11 UTC 2012
-rw-r--r--tools/libxc/xc_hvm_build.c35
-rw-r--r--xen/arch/x86/mm/p2m.c9
2 files changed, 25 insertions, 19 deletions
diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c
index d619f88dd1..4af08c7691 100644
--- a/tools/libxc/xc_hvm_build.c
+++ b/tools/libxc/xc_hvm_build.c
@@ -154,7 +154,7 @@ static int setup_guest(xc_interface *xch,
goto error_out;
if ( memsize > target )
- pod_mode = 1;
+ pod_mode = XENMEMF_populate_on_demand;
memset(&elf, 0, sizeof(elf));
if ( elf_init(&elf, image, image_size) != 0 )
@@ -194,6 +194,22 @@ static int setup_guest(xc_interface *xch,
for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
+ if ( pod_mode )
+ {
+ /*
+ * Subtract 0x20 from target_pages for the VGA "hole". Xen will
+ * adjust the PoD cache size so that domain tot_pages will be
+ * target_pages - 0x20 after this call.
+ */
+ rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
+ NULL, NULL, NULL);
+ if ( rc != 0 )
+ {
+ PERROR("Could not set PoD target for HVM guest.\n");
+ goto error_out;
+ }
+ }
+
/*
* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000.
*
@@ -205,7 +221,7 @@ static int setup_guest(xc_interface *xch,
* ensure that we can be preempted and hence dom0 remains responsive.
*/
rc = xc_domain_populate_physmap_exact(
- xch, dom, 0xa0, 0, 0, &page_array[0x00]);
+ xch, dom, 0xa0, 0, pod_mode, &page_array[0x00]);
cur_pages = 0xc0;
stat_normal_pages = 0xc0;
while ( (rc == 0) && (nr_pages > cur_pages) )
@@ -243,8 +259,7 @@ static int setup_guest(xc_interface *xch,
sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_1GB_SHIFT,
- pod_mode ? XENMEMF_populate_on_demand : 0,
- sp_extents);
+ pod_mode, sp_extents);
if ( done > 0 )
{
@@ -281,8 +296,7 @@ static int setup_guest(xc_interface *xch,
sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_2MB_SHIFT,
- pod_mode ? XENMEMF_populate_on_demand : 0,
- sp_extents);
+ pod_mode, sp_extents);
if ( done > 0 )
{
@@ -298,19 +312,12 @@ static int setup_guest(xc_interface *xch,
if ( count != 0 )
{
rc = xc_domain_populate_physmap_exact(
- xch, dom, count, 0, 0, &page_array[cur_pages]);
+ xch, dom, count, 0, pod_mode, &page_array[cur_pages]);
cur_pages += count;
stat_normal_pages += count;
}
}
- /* Subtract 0x20 from target_pages for the VGA "hole". Xen will
- * adjust the PoD cache size so that domain tot_pages will be
- * target_pages - 0x20 after this call. */
- if ( pod_mode )
- rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
- NULL, NULL, NULL);
-
if ( rc != 0 )
{
PERROR("Could not allocate memory for HVM guest.");
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index eda9c8fcdd..afa225c3fb 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -611,8 +611,9 @@ p2m_pod_set_mem_target(struct domain *d, unsigned long target)
p2m_lock(p2m);
- /* P == B: Nothing to do. */
- if ( p2m->pod.entry_count == 0 )
+ /* P == B: Nothing to do (unless the guest is being created). */
+ populated = d->tot_pages - p2m->pod.count;
+ if ( populated > 0 && p2m->pod.entry_count == 0 )
goto out;
/* Don't do anything if the domain is being torn down */
@@ -624,13 +625,11 @@ p2m_pod_set_mem_target(struct domain *d, unsigned long target)
if ( target < d->tot_pages )
goto out;
- populated = d->tot_pages - p2m->pod.count;
-
pod_target = target - populated;
/* B < T': Set the cache size equal to # of outstanding entries,
* let the balloon driver fill in the rest. */
- if ( pod_target > p2m->pod.entry_count )
+ if ( populated > 0 && pod_target > p2m->pod.entry_count )
pod_target = p2m->pod.entry_count;
ASSERT( pod_target >= p2m->pod.count );