aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorssmith@localhost.localdomain <ssmith@localhost.localdomain>2006-12-11 11:16:29 -0800
committerssmith@localhost.localdomain <ssmith@localhost.localdomain>2006-12-11 11:16:29 -0800
commit7a3d8f6e4dd56771a7514624bbace26883a36da7 (patch)
tree9d792ae7a6e4a47dc0ab56f13e7d1021360cdba1
parent8537fbaeade8b85d8751b2702b0b3e76dabaeff6 (diff)
downloadxen-7a3d8f6e4dd56771a7514624bbace26883a36da7.tar.gz
xen-7a3d8f6e4dd56771a7514624bbace26883a36da7.tar.bz2
xen-7a3d8f6e4dd56771a7514624bbace26883a36da7.zip
[XEN] The shadow FAST_FAULT_PATH optimisation assumes that pages never
transition between mmio and RAM-backed. This isn't true after a an add_to_physmap memory op. Fix this by just blowing the shadow tables after every such operation; they're rare enough that the performance hit is not a concern. Signed-off-by: Steven Smith <sos22@cam.ac.uk> Acked-by: Tim Deegan <Tim.Deegan@xensource.com>
-rw-r--r--xen/arch/x86/mm.c12
-rw-r--r--xen/arch/x86/mm/shadow/common.c2
-rw-r--r--xen/include/asm-x86/shadow.h3
3 files changed, 15 insertions, 2 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 48dea7cffb..4c26fc9969 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2968,7 +2968,17 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
guest_physmap_add_page(d, xatp.gpfn, mfn);
UNLOCK_BIGLOCK(d);
-
+
+ /* If we're doing FAST_FAULT_PATH, then shadow mode may have
+ cached the fact that this is an mmio region in the shadow
+ page tables. Blow the tables away to remove the cache.
+ This is pretty heavy handed, but this is a rare operation
+ (it might happen a dozen times during boot and then never
+ again), so it doesn't matter too much. */
+ shadow_lock(d);
+ shadow_blow_tables(d);
+ shadow_unlock(d);
+
put_domain(d);
break;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 1f295758d8..b4be85ee7e 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -733,7 +733,7 @@ void shadow_prealloc(struct domain *d, unsigned int order)
/* Deliberately free all the memory we can: this will tear down all of
* this domain's shadows */
-static void shadow_blow_tables(struct domain *d)
+void shadow_blow_tables(struct domain *d)
{
struct list_head *l, *t;
struct shadow_page_info *sp;
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 78998edea9..f923f662de 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -540,6 +540,9 @@ extern int shadow_remove_write_access(struct vcpu *v, mfn_t readonly_mfn,
* Returns non-zero if we need to flush TLBs. */
extern int shadow_remove_all_mappings(struct vcpu *v, mfn_t target_mfn);
+/* Remove all mappings from the shadows. */
+extern void shadow_blow_tables(struct domain *d);
+
void
shadow_remove_all_shadows_and_parents(struct vcpu *v, mfn_t gmfn);
/* This is a HVM page that we thing is no longer a pagetable.