aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/percpu.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-05-18 15:13:45 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-05-18 15:13:45 +0100
commit00c51c29c14611cdb4164fc64e89b6e7ad6c1df2 (patch)
tree4c4462e5d66a0bc71b12f669a3a3c9a17c682b1d /xen/arch/x86/percpu.c
parentc12bbde846af61d1b8441da6c627181ccbc7a157 (diff)
downloadxen-00c51c29c14611cdb4164fc64e89b6e7ad6c1df2.tar.gz
xen-00c51c29c14611cdb4164fc64e89b6e7ad6c1df2.tar.bz2
xen-00c51c29c14611cdb4164fc64e89b6e7ad6c1df2.zip
x86: Dynamically allocate percpu data area when a CPU comes online.
At the same time, the data area starts life zeroed. Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/arch/x86/percpu.c')
-rw-r--r--xen/arch/x86/percpu.c69
1 files changed, 69 insertions, 0 deletions
diff --git a/xen/arch/x86/percpu.c b/xen/arch/x86/percpu.c
new file mode 100644
index 0000000000..ea82647b42
--- /dev/null
+++ b/xen/arch/x86/percpu.c
@@ -0,0 +1,69 @@
+#include <xen/config.h>
+#include <xen/percpu.h>
+#include <xen/cpu.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+
+unsigned long __per_cpu_offset[NR_CPUS];
+#define INVALID_PERCPU_AREA (-(long)__per_cpu_start)
+#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start))
+
+void __init percpu_init_areas(void)
+{
+ unsigned int cpu;
+ for ( cpu = 1; cpu < NR_CPUS; cpu++ )
+ __per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
+}
+
+static int init_percpu_area(unsigned int cpu)
+{
+ char *p;
+ if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA )
+ return 0;
+ if ( (p = alloc_xenheap_pages(PERCPU_ORDER, 0)) == NULL )
+ return -ENOMEM;
+ memset(p, 0, __per_cpu_data_end - __per_cpu_start);
+ __per_cpu_offset[cpu] = p - __per_cpu_start;
+ return 0;
+}
+
+static void free_percpu_area(unsigned int cpu)
+{
+ char *p = __per_cpu_start + __per_cpu_offset[cpu];
+ free_xenheap_pages(p, PERCPU_ORDER);
+ __per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
+}
+
+static int cpu_percpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ int rc = 0;
+
+ switch ( action )
+ {
+ case CPU_UP_PREPARE:
+ rc = init_percpu_area(cpu);
+ break;
+ case CPU_UP_CANCELED:
+ case CPU_DEAD:
+ free_percpu_area(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
+}
+
+static struct notifier_block cpu_percpu_nfb = {
+ .notifier_call = cpu_percpu_callback,
+ .priority = 100 /* highest priority */
+};
+
+static int __init percpu_presmp_init(void)
+{
+ register_cpu_notifier(&cpu_percpu_nfb);
+ return 0;
+}
+presmp_initcall(percpu_presmp_init);