aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xensource.com>2007-11-02 10:37:59 +0000
committerKeir Fraser <keir@xensource.com>2007-11-02 10:37:59 +0000
commit07fb260f00b310acbbbfdd693d503d9ea800c7ad (patch)
treee49aefcc0aa952404012537d8599069b64446e78
parentbe13943be62fb242e9c6c5186e8e1f4e6173b4f8 (diff)
downloadxen-07fb260f00b310acbbbfdd693d503d9ea800c7ad.tar.gz
xen-07fb260f00b310acbbbfdd693d503d9ea800c7ad.tar.bz2
xen-07fb260f00b310acbbbfdd693d503d9ea800c7ad.zip
Live migration with MMIO pages: fix shadow pte propagation.
Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
-rw-r--r--xen/arch/x86/mm/shadow/multi.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 5996ed351c..92ef0445ac 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -697,7 +697,8 @@ _sh_propagate(struct vcpu *v,
/* N.B. For pass-through MMIO, either this test needs to be relaxed,
* and shadow_set_l1e() trained to handle non-valid MFNs (ugh), or the
* MMIO areas need to be added to the frame-table to make them "valid". */
- if ( !mfn_valid(target_mfn) && (p2mt != p2m_mmio_direct) )
+ if ( shadow_mode_refcounts(d) &&
+ !mfn_valid(target_mfn) && (p2mt != p2m_mmio_direct) )
{
ASSERT((ft == ft_prefetch));
*sp = shadow_l1e_empty();
@@ -713,6 +714,8 @@ _sh_propagate(struct vcpu *v,
_PAGE_RW | _PAGE_PRESENT);
if ( guest_supports_nx(v) )
pass_thru_flags |= _PAGE_NX_BIT;
+ if ( !shadow_mode_refcounts(d) && !mfn_valid(target_mfn) )
+ pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;
sflags = gflags & pass_thru_flags;
/* Only change memory caching type for pass-through domain */
@@ -765,10 +768,12 @@ _sh_propagate(struct vcpu *v,
// p2m_ram_logdirty p2m type: only HAP uses that.)
if ( unlikely((level == 1) && shadow_mode_log_dirty(d)) )
{
- if ( ft & FETCH_TYPE_WRITE )
- paging_mark_dirty(d, mfn_x(target_mfn));
- else if ( !sh_mfn_is_dirty(d, target_mfn) )
- sflags &= ~_PAGE_RW;
+ if ( mfn_valid(target_mfn) ) {
+ if ( ft & FETCH_TYPE_WRITE )
+ paging_mark_dirty(d, mfn_x(target_mfn));
+ else if ( !sh_mfn_is_dirty(d, target_mfn) )
+ sflags &= ~_PAGE_RW;
+ }
}
/* Read-only memory */
@@ -2843,7 +2848,8 @@ static int sh_page_fault(struct vcpu *v,
gfn = guest_l1e_get_gfn(gw.eff_l1e);
gmfn = gfn_to_mfn(d, gfn, &p2mt);
- if ( !p2m_is_valid(p2mt) || (!p2m_is_mmio(p2mt) && !mfn_valid(gmfn)) )
+ if ( shadow_mode_refcounts(d) &&
+ (!p2m_is_valid(p2mt) || (!p2m_is_mmio(p2mt) && !mfn_valid(gmfn))) )
{
perfc_incr(shadow_fault_bail_bad_gfn);
SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n",