aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-12-02 18:56:24 +0100
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-12-02 18:56:24 +0100
commit5e4eea77eec11301284ce4810885ba74a3bd2b17 (patch)
tree06ba504ad2c434eccb226bd48b36807838339f85
parent773ded42218d434302b31aa4501649534c62e6d0 (diff)
downloadxen-5e4eea77eec11301284ce4810885ba74a3bd2b17.tar.gz
xen-5e4eea77eec11301284ce4810885ba74a3bd2b17.tar.bz2
xen-5e4eea77eec11301284ce4810885ba74a3bd2b17.zip
Add a defensive batched tlb flush to free_page_type(), to
ensure the linear_pg_table remains in sync with modified page table structure. Otherwise we can update stale entries and screw reference counts (but probably only when running a malicious domain). Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--xen/arch/x86/mm.c40
1 files changed, 29 insertions, 11 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 31ec92fe42..b12eac5b91 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -128,8 +128,9 @@ static int mod_l1_entry(l1_pgentry_t *, l1_pgentry_t);
/* Used to defer flushing of memory structures. */
static struct {
-#define DOP_FLUSH_TLB (1<<0) /* Flush the TLB. */
-#define DOP_RELOAD_LDT (1<<1) /* Reload the LDT shadow mapping. */
+#define DOP_FLUSH_TLB (1<<0) /* Flush the local TLB. */
+#define DOP_FLUSH_ALL_TLBS (1<<1) /* Flush TLBs of all VCPUs of current dom. */
+#define DOP_RELOAD_LDT (1<<2) /* Reload the LDT shadow mapping. */
unsigned int deferred_ops;
/* If non-NULL, specifies a foreign subject domain for some operations. */
struct domain *foreign;
@@ -1323,14 +1324,28 @@ void free_page_type(struct pfn_info *page, unsigned long type)
struct domain *owner = page_get_owner(page);
unsigned long gpfn;
- if ( unlikely((owner != NULL) && shadow_mode_enabled(owner)) )
+ if ( likely(owner != NULL) )
{
- mark_dirty(owner, page_to_pfn(page));
- if ( unlikely(shadow_mode_refcounts(owner)) )
- return;
- gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
- ASSERT(VALID_M2P(gpfn));
- remove_shadow(owner, gpfn, type & PGT_type_mask);
+ /*
+ * We have to flush before the next use of the linear mapping
+ * (e.g., update_va_mapping()) or we could end up modifying a page
+ * that is no longer a page table (and hence screw up ref counts).
+ */
+ percpu_info[smp_processor_id()].deferred_ops |= DOP_FLUSH_ALL_TLBS;
+
+ if ( unlikely(shadow_mode_enabled(owner)) )
+ {
+ /* Raw page tables are rewritten during save/restore. */
+ if ( !shadow_mode_translate(owner) )
+ mark_dirty(owner, page_to_pfn(page));
+
+ if ( shadow_mode_refcounts(owner) )
+ return;
+
+ gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
+ ASSERT(VALID_M2P(gpfn));
+ remove_shadow(owner, gpfn, type & PGT_type_mask);
+ }
}
switch ( type & PGT_type_mask )
@@ -1600,11 +1615,14 @@ static void process_deferred_ops(unsigned int cpu)
deferred_ops = percpu_info[cpu].deferred_ops;
percpu_info[cpu].deferred_ops = 0;
- if ( deferred_ops & DOP_FLUSH_TLB )
+ if ( deferred_ops & (DOP_FLUSH_ALL_TLBS|DOP_FLUSH_TLB) )
{
if ( shadow_mode_enabled(d) )
shadow_sync_all(d);
- local_flush_tlb();
+ if ( deferred_ops & DOP_FLUSH_ALL_TLBS )
+ flush_tlb_mask(d->cpumask);
+ else
+ local_flush_tlb();
}
if ( deferred_ops & DOP_RELOAD_LDT )