diff options
author | cl349@firebug.cl.cam.ac.uk <cl349@firebug.cl.cam.ac.uk> | 2006-03-29 16:47:46 +0100 |
---|---|---|
committer | cl349@firebug.cl.cam.ac.uk <cl349@firebug.cl.cam.ac.uk> | 2006-03-29 16:47:46 +0100 |
commit | a10d8554f8deecbfd3937964c7c031592c7ac365 (patch) | |
tree | 407a6c8215095b65f9f02ddf4fefa4e1b593bb81 | |
parent | 816b45f1c4833c77ba4e15655c992b1f3de76710 (diff) | |
download | xen-a10d8554f8deecbfd3937964c7c031592c7ac365.tar.gz xen-a10d8554f8deecbfd3937964c7c031592c7ac365.tar.bz2 xen-a10d8554f8deecbfd3937964c7c031592c7ac365.zip |
Re-arrange code for followup patch and remove extra shadow_lock in function which is only called with the lock already held.
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
-rw-r--r-- | xen/include/asm-x86/shadow.h | 86 |
1 files changed, 42 insertions, 44 deletions
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h index 5a52029840..ca988bbece 100644 --- a/xen/include/asm-x86/shadow.h +++ b/xen/include/asm-x86/shadow.h @@ -135,6 +135,8 @@ extern int set_p2m_entry( struct domain_mmap_cache *l1cache); extern void remove_shadow(struct domain *d, unsigned long gpfn, u32 stype); +extern void free_shadow_page(unsigned long smfn); + extern void shadow_l1_normal_pt_update(struct domain *d, paddr_t pa, l1_pgentry_t l1e, struct domain_mmap_cache *cache); @@ -660,55 +662,13 @@ static inline void shadow_sync_and_drop_references( if ( likely(!shadow_mode_refcounts(d)) ) return; - shadow_lock(d); - if ( page_out_of_sync(page) ) __shadow_sync_mfn(d, page_to_mfn(page)); shadow_remove_all_access(d, page_to_mfn(page)); - - shadow_unlock(d); } #endif -static inline void guest_physmap_add_page( - struct domain *d, unsigned long gpfn, unsigned long mfn) -{ - struct domain_mmap_cache c1, c2; - - if ( likely(!shadow_mode_translate(d)) ) - return; - - domain_mmap_cache_init(&c1); - domain_mmap_cache_init(&c2); - shadow_lock(d); - shadow_sync_and_drop_references(d, mfn_to_page(mfn)); - set_p2m_entry(d, gpfn, mfn, &c1, &c2); - set_gpfn_from_mfn(mfn, gpfn); - shadow_unlock(d); - domain_mmap_cache_destroy(&c1); - domain_mmap_cache_destroy(&c2); -} - -static inline void guest_physmap_remove_page( - struct domain *d, unsigned long gpfn, unsigned long mfn) -{ - struct domain_mmap_cache c1, c2; - - if ( likely(!shadow_mode_translate(d)) ) - return; - - domain_mmap_cache_init(&c1); - domain_mmap_cache_init(&c2); - shadow_lock(d); - shadow_sync_and_drop_references(d, mfn_to_page(mfn)); - set_p2m_entry(d, gpfn, -1, &c1, &c2); - set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY); - shadow_unlock(d); - domain_mmap_cache_destroy(&c1); - domain_mmap_cache_destroy(&c2); -} - /************************************************************************/ /* @@ -739,8 +699,6 @@ get_shadow_ref(unsigned long smfn) return 1; } -extern void free_shadow_page(unsigned long smfn); - /* * Drop a shadow reference to smfn. */ @@ -1525,6 +1483,46 @@ static inline void set_shadow_status( /************************************************************************/ +static inline void guest_physmap_add_page( + struct domain *d, unsigned long gpfn, unsigned long mfn) +{ + struct domain_mmap_cache c1, c2; + + if ( likely(!shadow_mode_translate(d)) ) + return; + + domain_mmap_cache_init(&c1); + domain_mmap_cache_init(&c2); + shadow_lock(d); + shadow_sync_and_drop_references(d, mfn_to_page(mfn)); + set_p2m_entry(d, gpfn, mfn, &c1, &c2); + set_gpfn_from_mfn(mfn, gpfn); + shadow_unlock(d); + domain_mmap_cache_destroy(&c1); + domain_mmap_cache_destroy(&c2); +} + +static inline void guest_physmap_remove_page( + struct domain *d, unsigned long gpfn, unsigned long mfn) +{ + struct domain_mmap_cache c1, c2; + + if ( likely(!shadow_mode_translate(d)) ) + return; + + domain_mmap_cache_init(&c1); + domain_mmap_cache_init(&c2); + shadow_lock(d); + shadow_sync_and_drop_references(d, mfn_to_page(mfn)); + set_p2m_entry(d, gpfn, -1, &c1, &c2); + set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY); + shadow_unlock(d); + domain_mmap_cache_destroy(&c1); + domain_mmap_cache_destroy(&c2); +} + +/************************************************************************/ + void static inline shadow_update_min_max(unsigned long smfn, int index) { |