diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2010-06-30 18:22:56 +0100 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2010-06-30 18:22:56 +0100 |
commit | 898eb6cca5d9d4e5212c649a97094508d4c739e3 (patch) | |
tree | b739dc323c539f2159c905d16367f30b682ed4ef | |
parent | 88b22035a73d5bb35354244e0ff7cb595e212747 (diff) | |
download | xen-898eb6cca5d9d4e5212c649a97094508d4c739e3.tar.gz xen-898eb6cca5d9d4e5212c649a97094508d4c739e3.tar.bz2 xen-898eb6cca5d9d4e5212c649a97094508d4c739e3.zip |
x86: Only build memory-event features on 64-bit Xen
32-bit Xen doesn't have enough p2m types to support them.
Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
xen-unstable changeset: 21693:6b5a5bfaf357
xen-unstable date: Tue Jun 29 18:16:41 2010 +0100
-rw-r--r-- | xen/arch/x86/domctl.c | 2 | ||||
-rw-r--r-- | xen/arch/x86/hvm/hvm.c | 2 | ||||
-rw-r--r-- | xen/arch/x86/mm.c | 9 | ||||
-rw-r--r-- | xen/arch/x86/mm/Makefile | 6 | ||||
-rw-r--r-- | xen/arch/x86/mm/p2m.c | 14 | ||||
-rw-r--r-- | xen/include/asm-x86/mem_sharing.h | 8 | ||||
-rw-r--r-- | xen/include/asm-x86/p2m.h | 26 |
7 files changed, 50 insertions, 17 deletions
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 2fc24c9fd7..aa640a66b3 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1418,6 +1418,7 @@ long arch_do_domctl( break; #endif /* XEN_GDBSX_CONFIG */ +#ifdef __x86_64__ case XEN_DOMCTL_mem_event_op: { struct domain *d; @@ -1448,6 +1449,7 @@ long arch_do_domctl( } } break; +#endif /* __x86_64__ */ default: ret = -ENOSYS; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index f8311466b5..d42eefe085 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -947,6 +947,7 @@ bool_t hvm_hap_nested_page_fault(unsigned long gfn) return 1; } +#ifdef __x86_64__ /* Check if the page has been paged out */ if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) ) p2m_mem_paging_populate(current->domain, gfn); @@ -957,6 +958,7 @@ bool_t hvm_hap_nested_page_fault(unsigned long gfn) mem_sharing_unshare_page(current->domain, gfn, 0); return 1; } +#endif /* Spurious fault? PoD and log-dirty also take this path. */ if ( p2m_is_ram(p2mt) ) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index fb1730d508..594247a83a 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -3160,20 +3160,23 @@ int do_mmu_update( rc = -ENOENT; break; } +#ifdef __x86_64__ /* XXX: Ugly: pull all the checks into a separate function. * Don't want to do it now, not to interfere with mem_paging * patches */ else if ( p2m_ram_shared == l1e_p2mt ) { /* Unshare the page for RW foreign mappings */ - if(l1e_get_flags(l1e) & _PAGE_RW) + if ( l1e_get_flags(l1e) & _PAGE_RW ) { rc = mem_sharing_unshare_page(pg_owner, l1e_get_pfn(l1e), 0); - if(rc) break; + if ( rc ) + break; } } +#endif okay = mod_l1_entry(va, l1e, mfn, cmd == MMU_PT_UPDATE_PRESERVE_AD, v, @@ -4518,8 +4521,10 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) return rc; } +#ifdef __x86_64__ case XENMEM_get_sharing_freed_pages: return mem_sharing_get_nr_saved_mfns(); +#endif default: return subarch_memory_op(op, arg); diff --git a/xen/arch/x86/mm/Makefile b/xen/arch/x86/mm/Makefile index 277c5e171a..80ddcca4aa 100644 --- a/xen/arch/x86/mm/Makefile +++ b/xen/arch/x86/mm/Makefile @@ -6,9 +6,9 @@ obj-y += p2m.o obj-y += guest_walk_2.o obj-y += guest_walk_3.o obj-$(x86_64) += guest_walk_4.o -obj-y += mem_event.o -obj-y += mem_paging.o -obj-y += mem_sharing.o +obj-$(x86_64) += mem_event.o +obj-$(x86_64) += mem_paging.o +obj-$(x86_64) += mem_sharing.o guest_walk_%.o: guest_walk.c Makefile $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@ diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 768b5b8f3e..55b2d0dc76 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -1709,17 +1709,24 @@ void p2m_teardown(struct domain *d) { struct page_info *pg; struct p2m_domain *p2m = d->arch.p2m; +#ifdef __x86_64__ unsigned long gfn; p2m_type_t t; mfn_t mfn; +#endif p2m_lock(p2m); - for(gfn=0; gfn < p2m->max_mapped_pfn; gfn++) + + +#ifdef __x86_64__ + for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ ) { mfn = p2m->get_entry(d, gfn, &t, p2m_query); - if(mfn_valid(mfn) && (t == p2m_ram_shared)) + if ( mfn_valid(mfn) && (t == p2m_ram_shared) ) BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN)); } +#endif + d->arch.phys_table = pagetable_null(); while ( (pg = page_list_remove_head(&p2m->pages)) ) @@ -2415,6 +2422,7 @@ clear_mmio_p2m_entry(struct domain *d, unsigned long gfn) return rc; } +#ifdef __x86_64__ int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn) { @@ -2597,7 +2605,7 @@ void p2m_mem_paging_resume(struct domain *d) /* Unpause any domains that were paused because the ring was full */ mem_event_unpause_vcpus(d); } - +#endif /* __x86_64__ */ /* * Local variables: diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h index 230c9def18..ae7d694cd2 100644 --- a/xen/include/asm-x86/mem_sharing.h +++ b/xen/include/asm-x86/mem_sharing.h @@ -22,6 +22,8 @@ #ifndef __MEM_SHARING_H__ #define __MEM_SHARING_H__ +#ifdef __x86_64__ + #define sharing_supported(_d) \ (is_hvm_domain(_d) && (_d)->arch.hvm_domain.hap_enabled) @@ -43,4 +45,10 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec); void mem_sharing_init(void); +#else + +#define mem_sharing_init() do { } while (0) + +#endif /* __x86_64__ */ + #endif /* __MEM_SHARING_H__ */ diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 0fb485661f..a95390c64b 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -77,11 +77,12 @@ typedef enum { p2m_grant_map_rw = 7, /* Read/write grant mapping */ p2m_grant_map_ro = 8, /* Read-only grant mapping */ + /* Likewise, although these are defined in all builds, they can only + * be used in 64-bit builds */ p2m_ram_paging_out = 9, /* Memory that is being paged out */ p2m_ram_paged = 10, /* Memory that has been paged out */ p2m_ram_paging_in = 11, /* Memory that is being paged in */ p2m_ram_paging_in_start = 12, /* Memory that is being paged in */ - p2m_ram_shared = 13, /* Shared or sharable memory */ } p2m_type_t; @@ -154,6 +155,7 @@ typedef enum { #define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES) #define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES) + /* Populate-on-demand */ #define POPULATE_ON_DEMAND_MFN (1<<9) #define POD_PAGE_ORDER 9 @@ -314,20 +316,21 @@ static inline mfn_t gfn_to_mfn_unshare(struct domain *d, int must_succeed) { mfn_t mfn; - int ret; mfn = gfn_to_mfn(d, gfn, p2mt); - if(p2m_is_shared(*p2mt)) +#ifdef __x86_64__ + if ( p2m_is_shared(*p2mt) ) { - ret = mem_sharing_unshare_page(d, gfn, - must_succeed ? MEM_SHARING_MUST_SUCCEED : 0); - if(ret < 0) + if ( mem_sharing_unshare_page(d, gfn, + must_succeed + ? MEM_SHARING_MUST_SUCCEED : 0) ) { BUG_ON(must_succeed); return mfn; } mfn = gfn_to_mfn(d, gfn, p2mt); } +#endif return mfn; } @@ -429,10 +432,11 @@ p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, /* Set mmio addresses in the p2m table (for pass-through) */ int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn); -/* Modify p2m table for shared gfn */ -int -set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); + +#ifdef __x86_64__ +/* Modify p2m table for shared gfn */ +int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); /* Check if a nominated gfn is valid to be paged out */ int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn); /* Evict a frame */ @@ -443,6 +447,10 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn); int p2m_mem_paging_prep(struct domain *d, unsigned long gfn); /* Resume normal operation (in case a domain was paused) */ void p2m_mem_paging_resume(struct domain *d); +#else +static inline void p2m_mem_paging_populate(struct domain *d, unsigned long gfn) +{ } +#endif #endif /* _XEN_P2M_H */ |