From 9582cc5affd7e0ea070ce57b968ff1999f3afebb Mon Sep 17 00:00:00 2001 From: "iap10@tetris.cl.cam.ac.uk" Date: Fri, 2 Apr 2004 16:27:45 +0000 Subject: bitkeeper revision 1.835 (406d9481GqoZ_RrT3GukXhamv7rulA) Delete shadow page tables when destroying domain --- xen/common/domain.c | 3 + xen/common/memory.c | 9 ++- xen/common/schedule.c | 10 +++- xen/common/shadow.c | 140 +--------------------------------------------- xen/include/xen/shadow.h | 141 +++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 161 insertions(+), 142 deletions(-) diff --git a/xen/common/domain.c b/xen/common/domain.c index 2db0331d89..7b55ee89d4 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -19,6 +19,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64BITMODE #define ELFSIZE 64 @@ -382,6 +383,8 @@ void free_all_dom_mem(struct task_struct *p) INIT_LIST_HEAD(&zombies); + if ( p->mm.shadow_mode ) shadow_mode_disable(p); + /* STEP 1. Drop the in-use reference to the page-table base. */ put_page_and_type(&frame_table[pagetable_val(p->mm.pagetable) >> PAGE_SHIFT]); diff --git a/xen/common/memory.c b/xen/common/memory.c index edba5c02de..b7896e3cfb 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -770,7 +770,14 @@ void free_page_type(struct pfn_info *page, unsigned int type) (get_shadow_status(¤t->mm, page-frame_table) & PSH_shadowed) ) { - unshadow_table( page-frame_table, type ); + /* using 'current-mm' is safe because page type changes only + occur within the context of the currently running domain as + pagetable pages can not be shared across domains. The one + exception is when destroying a domain. However, we get away + with this as there's no way the current domain can have this + mfn shadowed, so we won't get here... Phew! */ + + unshadow_table( page-frame_table, type ); put_shadow_status(¤t->mm); } return; diff --git a/xen/common/schedule.c b/xen/common/schedule.c index fcdf860351..6692bacaa3 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -283,7 +283,13 @@ long do_sched_op(unsigned long op) } -/* sched_pause_sync - synchronously pause a domain's execution */ +/* sched_pause_sync - synchronously pause a domain's execution + +XXXX This is horibly broken -- here just as a place holder at present, + do not use. + +*/ + void sched_pause_sync(struct task_struct *p) { unsigned long flags; @@ -293,7 +299,7 @@ void sched_pause_sync(struct task_struct *p) if ( schedule_data[cpu].curr != p ) /* if not the current task, we can remove it from scheduling now */ - SCHED_FN(pause, p); + SCHED_OP(pause, p); p->state = TASK_PAUSED; diff --git a/xen/common/shadow.c b/xen/common/shadow.c index 8e7b53db86..14d395cbf7 100644 --- a/xen/common/shadow.c +++ b/xen/common/shadow.c @@ -225,7 +225,7 @@ nomem: return -ENOMEM; } -static void shadow_mode_disable( struct task_struct *p ) +void shadow_mode_disable( struct task_struct *p ) { struct mm_struct *m = &p->mm; struct shadow_status *next; @@ -353,144 +353,6 @@ static inline struct pfn_info *alloc_shadow_page( struct mm_struct *m ) return alloc_domain_page( NULL ); } -/************************************************************************/ - -static inline void mark_dirty( struct mm_struct *m, unsigned int mfn ) -{ - unsigned int pfn = machine_to_phys_mapping[mfn]; - ASSERT(m->shadow_dirty_bitmap); - if( likely(pfnshadow_dirty_bitmap_size) ) - { - // XXX use setbit - m->shadow_dirty_bitmap[pfn/(sizeof(int)*8)] |= - (1<<(pfn%(sizeof(int)*8))); - } - else - { - printk("XXXX mark dirty overflow!"); - } - -} - -/************************************************************************/ - -static inline void l1pte_write_fault( struct mm_struct *m, - unsigned long *gpte_p, unsigned long *spte_p ) -{ - unsigned long gpte = *gpte_p; - unsigned long spte = *spte_p; - - switch( m->shadow_mode ) - { - case SHM_test: - spte = gpte; - gpte |= _PAGE_DIRTY | _PAGE_ACCESSED; - spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; - break; - - case SHM_logdirty: - spte = gpte; - gpte |= _PAGE_DIRTY | _PAGE_ACCESSED; - spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; - mark_dirty( m, gpte >> PAGE_SHIFT ); - break; - } - - *gpte_p = gpte; - *spte_p = spte; -} - -static inline void l1pte_read_fault( struct mm_struct *m, - unsigned long *gpte_p, unsigned long *spte_p ) -{ - unsigned long gpte = *gpte_p; - unsigned long spte = *spte_p; - - switch( m->shadow_mode ) - { - case SHM_test: - spte = gpte; - gpte |= _PAGE_ACCESSED; - spte |= _PAGE_ACCESSED; - if ( ! (gpte & _PAGE_DIRTY ) ) - spte &= ~ _PAGE_RW; - break; - - case SHM_logdirty: - spte = gpte; - gpte |= _PAGE_ACCESSED; - spte |= _PAGE_ACCESSED; - spte &= ~ _PAGE_RW; - break; - } - - *gpte_p = gpte; - *spte_p = spte; -} - -static inline void l1pte_no_fault( struct mm_struct *m, - unsigned long *gpte_p, unsigned long *spte_p ) -{ - unsigned long gpte = *gpte_p; - unsigned long spte = *spte_p; - - switch( m->shadow_mode ) - { - case SHM_test: - spte = 0; - if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == - (_PAGE_PRESENT|_PAGE_ACCESSED) ) - { - spte = gpte; - if ( ! (gpte & _PAGE_DIRTY ) ) - spte &= ~ _PAGE_RW; - } - break; - - case SHM_logdirty: - spte = 0; - if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == - (_PAGE_PRESENT|_PAGE_ACCESSED) ) - { - spte = gpte; - spte &= ~ _PAGE_RW; - } - - break; - } - - *gpte_p = gpte; - *spte_p = spte; -} - -static inline void l2pde_general( struct mm_struct *m, - unsigned long *gpde_p, unsigned long *spde_p, - unsigned long sl1pfn) -{ - unsigned long gpde = *gpde_p; - unsigned long spde = *spde_p; - - spde = 0; - - if ( sl1pfn ) - { - spde = (gpde & ~PAGE_MASK) | (sl1pfn<shadow_dirty_bitmap); + if( likely(pfnshadow_dirty_bitmap_size) ) + { + // use setbit to be smp guest safe + set_bit( pfn, m->shadow_dirty_bitmap ); + } + else + { + SH_LOG("mark_dirty pfn out of range attempt!"); + } + +} + +/************************************************************************/ + +static inline void l1pte_write_fault( struct mm_struct *m, + unsigned long *gpte_p, unsigned long *spte_p ) +{ + unsigned long gpte = *gpte_p; + unsigned long spte = *spte_p; + + switch( m->shadow_mode ) + { + case SHM_test: + spte = gpte; + gpte |= _PAGE_DIRTY | _PAGE_ACCESSED; + spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; + break; + + case SHM_logdirty: + spte = gpte; + gpte |= _PAGE_DIRTY | _PAGE_ACCESSED; + spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; + mark_dirty( m, gpte >> PAGE_SHIFT ); + break; + } + + *gpte_p = gpte; + *spte_p = spte; +} + +static inline void l1pte_read_fault( struct mm_struct *m, + unsigned long *gpte_p, unsigned long *spte_p ) +{ + unsigned long gpte = *gpte_p; + unsigned long spte = *spte_p; + + switch( m->shadow_mode ) + { + case SHM_test: + spte = gpte; + gpte |= _PAGE_ACCESSED; + spte |= _PAGE_ACCESSED; + if ( ! (gpte & _PAGE_DIRTY ) ) + spte &= ~ _PAGE_RW; + break; + + case SHM_logdirty: + spte = gpte; + gpte |= _PAGE_ACCESSED; + spte |= _PAGE_ACCESSED; + spte &= ~ _PAGE_RW; + break; + } + + *gpte_p = gpte; + *spte_p = spte; +} + +static inline void l1pte_no_fault( struct mm_struct *m, + unsigned long *gpte_p, unsigned long *spte_p ) +{ + unsigned long gpte = *gpte_p; + unsigned long spte = *spte_p; + + switch( m->shadow_mode ) + { + case SHM_test: + spte = 0; + if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == + (_PAGE_PRESENT|_PAGE_ACCESSED) ) + { + spte = gpte; + if ( ! (gpte & _PAGE_DIRTY ) ) + spte &= ~ _PAGE_RW; + } + break; + + case SHM_logdirty: + spte = 0; + if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == + (_PAGE_PRESENT|_PAGE_ACCESSED) ) + { + spte = gpte; + spte &= ~ _PAGE_RW; + } + + break; + } + + *gpte_p = gpte; + *spte_p = spte; +} + +static inline void l2pde_general( struct mm_struct *m, + unsigned long *gpde_p, unsigned long *spde_p, + unsigned long sl1pfn) +{ + unsigned long gpde = *gpde_p; + unsigned long spde = *spde_p; + + spde = 0; + + if ( sl1pfn ) + { + spde = (gpde & ~PAGE_MASK) | (sl1pfn<