aboutsummaryrefslogtreecommitdiffstats
path: root/tools/libxc/xc_domain_restore.c
diff options
context:
space:
mode:
authorLiu Jinsong <jinsong.liu@intel.com>2012-12-06 10:47:22 +0000
committerLiu Jinsong <jinsong.liu@intel.com>2012-12-06 10:47:22 +0000
commitb7a98e60d4516b1a2a0d5cda0773bab960d208d4 (patch)
treec5f495bced25bbbf7c0fba3473c8cfa6a963c884 /tools/libxc/xc_domain_restore.c
parentd7942d1dbe5d98cef71c00c51f2826eefb9273d0 (diff)
downloadxen-b7a98e60d4516b1a2a0d5cda0773bab960d208d4.tar.gz
xen-b7a98e60d4516b1a2a0d5cda0773bab960d208d4.tar.bz2
xen-b7a98e60d4516b1a2a0d5cda0773bab960d208d4.zip
X86/vMCE: handle broken page with regard to migration
At the sender xc_domain_save has a key point: 'to query the types of all the pages with xc_get_pfn_type_batch' 1) if broken page occur before the key point, migration will be fine since proper pfn_type and pfn number will be transferred to the target and then take appropriate action; 2) if broken page occur after the key point, whole system will crash and no need care migration any more; At the target Target will populates pages for guest. As for the case of broken page, we prefer to keep the type of the page for the sake of seamless migration. Target will set p2m as p2m_ram_broken for broken page. If guest access the broken page again it will kill itself as expected. Suggested-by: George Dunlap <george.dunlap@eu.citrix.com> Signed-off-by: Liu Jinsong <jinsong.liu@intel.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> Committed-by: Ian Campbell <ian.campbell@citrix.com>
Diffstat (limited to 'tools/libxc/xc_domain_restore.c')
-rw-r--r--tools/libxc/xc_domain_restore.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
index 02bfa1c163..454d2cbd49 100644
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -1023,9 +1023,15 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
countpages = count;
for (i = oldcount; i < buf->nr_pages; ++i)
- if ((buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XTAB
- ||(buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XALLOC)
+ {
+ unsigned long pagetype;
+
+ pagetype = buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK;
+ if ( pagetype == XEN_DOMCTL_PFINFO_XTAB ||
+ pagetype == XEN_DOMCTL_PFINFO_BROKEN ||
+ pagetype == XEN_DOMCTL_PFINFO_XALLOC )
--countpages;
+ }
if (!countpages)
return count;
@@ -1267,6 +1273,17 @@ static int apply_batch(xc_interface *xch, uint32_t dom, struct restore_ctx *ctx,
/* a bogus/unmapped/allocate-only page: skip it */
continue;
+ if ( pagetype == XEN_DOMCTL_PFINFO_BROKEN )
+ {
+ if ( xc_set_broken_page_p2m(xch, dom, pfn) )
+ {
+ ERROR("Set p2m for broken page failed, "
+ "dom=%d, pfn=%lx\n", dom, pfn);
+ goto err_mapped;
+ }
+ continue;
+ }
+
if (pfn_err[i])
{
ERROR("unexpected PFN mapping failure pfn %lx map_mfn %lx p2m_mfn %lx",