From fe7521484fb7c586edac84d174bec956e6c874b7 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Tue, 5 Feb 2008 09:23:59 -0700 Subject: [IA64] Fix domain refernece counting Fix the domain refernece counting caused by allocated pages from domheap for shared page and hyperregister page. Calling share_xen_page_with_guest() with domain heap page is wrong so that it increments domian->xenpages which is never decremented. Thus the domian refcount doesn't decrease to 0 so that destroy_domain() is never called. This patch make the allocation done from xenheap again. The other way to fix it is to work around domain->xenheap and the page refrence count somehow, but it would be very ugly. The right way to do so is to enhance the xen page allocator to be aware of this kind of page in addition to xenheap and domheap. But we don't want to touch the common code. And given that the limitation on xenheap of xen/ia64 is much relaxed, probably it isn't necessary to be so nervouse not to allocate those pages from xenheap. If it happend to be necessary to allocate those pages from domheap, we could address it at that time. For now just allocate them from xenheap. Signed-off-by: Isaku Yamahata --- xen/arch/ia64/xen/domain.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c index 0e54987a34..9e636638f3 100644 --- a/xen/arch/ia64/xen/domain.c +++ b/xen/arch/ia64/xen/domain.c @@ -398,7 +398,7 @@ void relinquish_vcpu_resources(struct vcpu *v) if (HAS_PERVCPU_VHPT(v->domain)) pervcpu_vhpt_free(v); if (v->arch.privregs != NULL) { - free_domheap_pages(virt_to_page(v->arch.privregs), + free_xenheap_pages(v->arch.privregs, get_order_from_shift(XMAPPEDREGS_SHIFT)); v->arch.privregs = NULL; } @@ -500,7 +500,6 @@ static void vcpu_share_privregs_with_guest(struct vcpu *v) int vcpu_late_initialise(struct vcpu *v) { struct domain *d = v->domain; - struct page_info *page; int rc, order; if (HAS_PERVCPU_VHPT(d)) { @@ -511,11 +510,9 @@ int vcpu_late_initialise(struct vcpu *v) /* Create privregs page. */ order = get_order_from_shift(XMAPPEDREGS_SHIFT); - page = alloc_domheap_pages(NULL, order, 0); - if (page == NULL) + v->arch.privregs = alloc_xenheap_pages(order); + if (v->arch.privregs == NULL) return -ENOMEM; - - v->arch.privregs = page_to_virt(page); BUG_ON(v->arch.privregs == NULL); memset(v->arch.privregs, 0, 1 << XMAPPEDREGS_SHIFT); vcpu_share_privregs_with_guest(v); @@ -562,8 +559,7 @@ integer_param("pervcpu_vhpt", opt_pervcpu_vhpt); int arch_domain_create(struct domain *d, unsigned int domcr_flags) { int i; - struct page_info *page = NULL; - + // the following will eventually need to be negotiated dynamically d->arch.shared_info_va = DEFAULT_SHAREDINFO_ADDR; d->arch.breakimm = 0x1000; @@ -582,10 +578,9 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) #endif if (tlb_track_create(d) < 0) goto fail_nomem1; - page = alloc_domheap_pages(NULL, get_order_from_shift(XSI_SHIFT), 0); - if (page == NULL) + d->shared_info = alloc_xenheap_pages(get_order_from_shift(XSI_SHIFT)); + if (d->shared_info == NULL) goto fail_nomem; - d->shared_info = page_to_virt(page); BUG_ON(d->shared_info == NULL); memset(d->shared_info, 0, XSI_SIZE); for (i = 0; i < XSI_SIZE; i += PAGE_SIZE) @@ -628,8 +623,9 @@ fail_nomem: fail_nomem1: if (d->arch.mm.pgd != NULL) pgd_free(d->arch.mm.pgd); - if (page != NULL) - free_domheap_pages(page, get_order_from_shift(XSI_SHIFT)); + if (d->shared_info != NULL) + free_xenheap_pages(d->shared_info, + get_order_from_shift(XSI_SHIFT)); return -ENOMEM; } @@ -638,7 +634,7 @@ void arch_domain_destroy(struct domain *d) mm_final_teardown(d); if (d->shared_info != NULL) - free_domheap_pages(virt_to_page(d->shared_info), + free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT)); tlb_track_destroy(d); -- cgit v1.2.3