aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoriap10@tetris.cl.cam.ac.uk <iap10@tetris.cl.cam.ac.uk>2004-04-05 06:40:47 +0000
committeriap10@tetris.cl.cam.ac.uk <iap10@tetris.cl.cam.ac.uk>2004-04-05 06:40:47 +0000
commit8861f72c5bbee31fe0ae33d3eb8e9fd6e2b3f796 (patch)
treebe6d638ba75c0f18113f5a6b9a3e71136954d969
parent5b7c4eaf7289515417a1fec4fbfa29bf2798e158 (diff)
downloadxen-8861f72c5bbee31fe0ae33d3eb8e9fd6e2b3f796.tar.gz
xen-8861f72c5bbee31fe0ae33d3eb8e9fd6e2b3f796.tar.bz2
xen-8861f72c5bbee31fe0ae33d3eb8e9fd6e2b3f796.zip
bitkeeper revision 1.836.1.1 (4070ff6fJhQoaxeSlTRL6ojba0QXfw)
shadow logdirty mode added
-rw-r--r--xen/common/memory.c7
-rw-r--r--xen/include/asm-i386/page.h2
-rw-r--r--xen/include/xen/shadow.h20
3 files changed, 26 insertions, 3 deletions
diff --git a/xen/common/memory.c b/xen/common/memory.c
index b7896e3cfb..6b3b9d4e70 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1149,7 +1149,12 @@ int do_update_va_mapping(unsigned long page_nr,
}
check_pagetable( p, p->mm.pagetable, "va" ); // debug
-
+
+ /* if we're in logdirty mode, we need to note that we've updated the
+ PTE in the PT-holding page. This is a bit of a pain as we don't
+ know the physcial (machine) frame number of the page */
+ if ( p->mm.shadow_mode == SHM_logdirty )
+ mark_dirty( &current->mm, va_to_l1mfn(page_nr<<PAGE_SHIFT) );
}
diff --git a/xen/include/asm-i386/page.h b/xen/include/asm-i386/page.h
index 1f8260e03c..54cbd85f11 100644
--- a/xen/include/asm-i386/page.h
+++ b/xen/include/asm-i386/page.h
@@ -93,6 +93,8 @@ typedef struct { unsigned long pt_lo; } pagetable_t;
#define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START)
#define linear_l2_table ((l2_pgentry_t *)(LINEAR_PT_VIRT_START+(LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
+#define va_to_l1mfn(_va) (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
+
extern l2_pgentry_t idle_pg_table[ENTRIES_PER_L2_PAGETABLE];
extern void paging_init(void);
diff --git a/xen/include/xen/shadow.h b/xen/include/xen/shadow.h
index f5c0d5327a..e5399d4ad0 100644
--- a/xen/include/xen/shadow.h
+++ b/xen/include/xen/shadow.h
@@ -82,8 +82,12 @@ static inline void mark_dirty( struct mm_struct *m, unsigned int mfn )
ASSERT(m->shadow_dirty_bitmap);
if( likely(pfn<m->shadow_dirty_bitmap_size) )
{
- // use setbit to be smp guest safe
- set_bit( pfn, m->shadow_dirty_bitmap );
+ /* use setbit to be smp guest safe. Since the same page is likely to
+ get marked dirty many times, examine the bit first before doing the
+ expensive lock-prefixed opertion */
+
+ if (! test_bit( pfn, m->shadow_dirty_bitmap ) )
+ set_bit( pfn, m->shadow_dirty_bitmap );
}
else
{
@@ -328,6 +332,18 @@ static inline unsigned long get_shadow_status( struct mm_struct *m,
{
unsigned long res;
+ /* If we get here, we know that this domain is running in shadow mode.
+ We also know that some sort of update has happened to the underlying
+ page table page: either a PTE has been updated, or the page has
+ changed type. If we're in log dirty mode, we should set the approrpiate
+ bit in the dirty bitmap.
+ NB: the VA update path doesn't use this so needs to be handled
+ independnetly.
+ */
+
+ if( m->shadow_mode == SHM_logdirty )
+ mark_dirty( m, gpfn );
+
spin_lock(&m->shadow_lock);
res = __shadow_status( m, gpfn );
if (!res) spin_unlock(&m->shadow_lock);