aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/x86_64/mm.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-05-18 15:05:54 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-05-18 15:05:54 +0100
commitc12bbde846af61d1b8441da6c627181ccbc7a157 (patch)
tree7b8a9e3598e5ad236745f0eca8379530ca88f792 /xen/arch/x86/x86_64/mm.c
parent852ed0aaf07ad074ac111b1a432de5b816aad7fd (diff)
downloadxen-c12bbde846af61d1b8441da6c627181ccbc7a157.tar.gz
xen-c12bbde846af61d1b8441da6c627181ccbc7a157.tar.bz2
xen-c12bbde846af61d1b8441da6c627181ccbc7a157.zip
x86: Pull dynamic memory allocation out of do_boot_cpu().
This has two advantages: (a) We can move the allocations to a context where we can handle failure. (b) We can implement matching deallocations on CPU offline. Only the idle vcpu structure is now not freed on CPU offline. This probably does not really matter. Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/arch/x86/x86_64/mm.c')
-rw-r--r--xen/arch/x86/x86_64/mm.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 71d0a11ea5..357a80a5d7 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -810,20 +810,22 @@ void __init zap_low_mappings(void)
int __cpuinit setup_compat_arg_xlat(unsigned int cpu, int node)
{
unsigned int order = get_order_from_bytes(COMPAT_ARG_XLAT_SIZE);
- unsigned long sz = PAGE_SIZE << order;
unsigned int memflags = node != NUMA_NO_NODE ? MEMF_node(node) : 0;
struct page_info *pg;
- pg = alloc_domheap_pages(NULL, order, memflags);
- if ( !pg )
- return -ENOMEM;
-
- for ( ; (sz -= PAGE_SIZE) >= COMPAT_ARG_XLAT_SIZE; ++pg )
- free_domheap_page(pg);
+ BUG_ON((PAGE_SIZE << order) != COMPAT_ARG_XLAT_SIZE);
- per_cpu(compat_arg_xlat, cpu) = page_to_virt(pg);
+ pg = alloc_domheap_pages(NULL, order, memflags);
+ per_cpu(compat_arg_xlat, cpu) = pg ? page_to_virt(pg) : NULL;
+ return pg ? 0 : -ENOMEM;
+}
- return 0;
+void __cpuinit free_compat_arg_xlat(unsigned int cpu)
+{
+ unsigned int order = get_order_from_bytes(COMPAT_ARG_XLAT_SIZE);
+ if ( per_cpu(compat_arg_xlat, cpu) != NULL )
+ free_domheap_pages(virt_to_page(per_cpu(compat_arg_xlat, cpu)), order);
+ per_cpu(compat_arg_xlat, cpu) = NULL;
}
void cleanup_frame_table(struct mem_hotadd_info *info)