aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/mm.c54
-rw-r--r--xen/arch/x86/mm/shadow/common.c25
-rw-r--r--xen/include/asm-x86/mm.h19
-rw-r--r--xen/include/asm-x86/shadow.h13
4 files changed, 41 insertions, 70 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index f2e6240770..8b09e26f8f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1490,6 +1490,12 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
int alloc_page_type(struct page_info *page, unsigned long type)
{
+ struct domain *owner = page_get_owner(page);
+
+ /* A page table is dirtied when its type count becomes non-zero. */
+ if ( likely(owner != NULL) )
+ mark_dirty(owner, page_to_mfn(page));
+
switch ( type & PGT_type_mask )
{
case PGT_l1_page_table:
@@ -1528,9 +1534,11 @@ void free_page_type(struct page_info *page, unsigned long type)
*/
this_cpu(percpu_mm_info).deferred_ops |= DOP_FLUSH_ALL_TLBS;
- if ( unlikely(shadow_mode_enabled(owner)
- && !shadow_lock_is_acquired(owner)) )
+ if ( unlikely(shadow_mode_enabled(owner)) )
{
+ /* A page table is dirtied when its type count becomes zero. */
+ mark_dirty(owner, page_to_mfn(page));
+
if ( shadow_mode_refcounts(owner) )
return;
@@ -1603,19 +1611,19 @@ void put_page_type(struct page_info *page)
nx &= ~PGT_validated;
}
- /* Record TLB information for flush later. */
- page->tlbflush_timestamp = tlbflush_current_time();
+ /*
+ * Record TLB information for flush later. We do not stamp page
+ * tables when running in shadow mode:
+ * 1. Pointless, since it's the shadow pt's which must be tracked.
+ * 2. Shadow mode reuses this field for shadowed page tables to
+ * store flags info -- we don't want to conflict with that.
+ */
+ if ( !shadow_mode_enabled(page_get_owner(page)) ||
+ ((nx & PGT_type_mask) == PGT_writable_page) )
+ page->tlbflush_timestamp = tlbflush_current_time();
}
}
while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
-
- /*
- * A page table is dirtied when its type count becomes zero.
- * We cannot set the dirty flag earlier than this because we must wait
- * until the type count has been zeroed by the CMPXCHG above.
- */
- if ( unlikely((nx & (PGT_validated|PGT_count_mask)) == 0) )
- mark_dirty(page_get_owner(page), page_to_mfn(page));
}
@@ -1648,7 +1656,10 @@ int get_page_type(struct page_info *page, unsigned long type)
page_get_owner(page)->domain_dirty_cpumask;
tlbflush_filter(mask, page->tlbflush_timestamp);
- if ( unlikely(!cpus_empty(mask)) )
+ if ( unlikely(!cpus_empty(mask)) &&
+ /* Shadow mode: track only writable pages. */
+ (!shadow_mode_enabled(page_get_owner(page)) ||
+ ((nx & PGT_type_mask) == PGT_writable_page)) )
{
perfc_incrc(need_flush_tlb_flush);
flush_tlb_mask(mask);
@@ -1701,13 +1712,6 @@ int get_page_type(struct page_info *page, unsigned long type)
/* Noone else is updating simultaneously. */
__set_bit(_PGT_validated, &page->u.inuse.type_info);
-
- /*
- * A page table is dirtied when its type count becomes non-zero. It is
- * safe to mark dirty here because any PTE modifications in
- * alloc_page_type() have now happened.
- */
- mark_dirty(page_get_owner(page), page_to_mfn(page));
}
return 1;
@@ -2001,14 +2005,8 @@ int do_mmuext_op(
{
put_page_and_type(page);
put_page(page);
- if ( shadow_mode_enabled(d) )
- {
- shadow_lock(d);
- shadow_remove_all_shadows(v, _mfn(mfn));
- /* A page is dirtied when its pin status is cleared. */
- sh_mark_dirty(d, _mfn(mfn));
- shadow_unlock(d);
- }
+ /* A page is dirtied when its pin status is cleared. */
+ mark_dirty(d, mfn);
}
else
{
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index a46f7b1cb1..4ff1ebd445 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -232,32 +232,15 @@ struct x86_emulate_ops shadow_emulator_ops = {
void shadow_promote(struct vcpu *v, mfn_t gmfn, u32 type)
{
struct page_info *page = mfn_to_page(gmfn);
- unsigned long type_info;
ASSERT(valid_mfn(gmfn));
/* We should never try to promote a gmfn that has writeable mappings */
ASSERT(shadow_remove_write_access(v, gmfn, 0, 0) == 0);
- // Is the page already shadowed?
+ /* Is the page already shadowed? */
if ( !test_and_set_bit(_PGC_page_table, &page->count_info) )
- {
- // No prior shadow exists...
-
- // Grab a type-ref. We don't really care if we are racing with another
- // vcpu or not, or even what kind of type we get; we just want the type
- // count to be > 0.
- //
- do {
- type_info = page->u.inuse.type_info &
- (PGT_type_mask | PGT_pae_xen_l2);
- } while ( !get_page_type(page, type_info) );
-
- // Now that the type ref is non-zero, we can safely use the
- // shadow_flags.
- //
page->shadow_flags = 0;
- }
ASSERT(!test_bit(type >> PGC_SH_type_shift, &page->shadow_flags));
set_bit(type >> PGC_SH_type_shift, &page->shadow_flags);
@@ -273,13 +256,7 @@ void shadow_demote(struct vcpu *v, mfn_t gmfn, u32 type)
clear_bit(type >> PGC_SH_type_shift, &page->shadow_flags);
if ( (page->shadow_flags & SHF_page_type_mask) == 0 )
- {
- // release the extra type ref
- put_page_type(page);
-
- // clear the is-a-page-table bit.
clear_bit(_PGC_page_table, &page->count_info);
- }
}
/**************************************************************************/
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 2db8563924..64a6dd33ee 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -51,18 +51,19 @@ struct page_info
} u;
union {
- /* Timestamp from 'TLB clock', used to reduce need for safety
- * flushes. Only valid on a) free pages, and b) guest pages with a
- * zero type count. */
+ /*
+ * Timestamp from 'TLB clock', used to avoid extra safety flushes.
+ * Only valid for: a) free pages, and b) pages with zero type count
+ * (except page table pages when the guest is in shadow mode).
+ */
u32 tlbflush_timestamp;
- /* Only used on guest pages with a shadow.
- * Guest pages with a shadow must have a non-zero type count, so this
- * does not conflict with the tlbflush timestamp. */
+ /*
+ * Guest pages with a shadow. This does not conflict with
+ * tlbflush_timestamp since page table pages are explicitly not
+ * tracked for TLB-flush avoidance when a guest runs in shadow mode.
+ */
u32 shadow_flags;
-
- // XXX -- we expect to add another field here, to be used for min/max
- // purposes, which is only used for shadow pages.
};
};
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index b7c536477a..94a4ff11fd 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -325,24 +325,19 @@ void shadow_final_teardown(struct domain *d);
void sh_do_mark_dirty(struct domain *d, mfn_t gmfn);
static inline void mark_dirty(struct domain *d, unsigned long gmfn)
{
- int caller_locked;
-
- if ( unlikely(d == NULL) || likely(!shadow_mode_log_dirty(d)) )
+ if ( likely(!shadow_mode_log_dirty(d)) )
return;
- caller_locked = shadow_lock_is_acquired(d);
- if ( !caller_locked )
- shadow_lock(d);
+ shadow_lock(d);
sh_do_mark_dirty(d, _mfn(gmfn));
- if ( !caller_locked )
- shadow_unlock(d);
+ shadow_unlock(d);
}
/* Internal version, for when the shadow lock is already held */
static inline void sh_mark_dirty(struct domain *d, mfn_t gmfn)
{
ASSERT(shadow_lock_is_acquired(d));
- if ( shadow_mode_log_dirty(d) )
+ if ( unlikely(shadow_mode_log_dirty(d)) )
sh_do_mark_dirty(d, gmfn);
}