aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/percpu.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-01-08 09:14:23 +0000
committerKeir Fraser <keir@xen.org>2011-01-08 09:14:23 +0000
commit1be584b57a4657d680e5551dc253ce164f06b4ac (patch)
tree48271f4d31f3bbfdb8f2d8d57f3ad04b8d64a5f3 /xen/arch/x86/percpu.c
parentc65422a6c4fdff1e47ea55e7701b53208c3bc429 (diff)
downloadxen-1be584b57a4657d680e5551dc253ce164f06b4ac.tar.gz
xen-1be584b57a4657d680e5551dc253ce164f06b4ac.tar.bz2
xen-1be584b57a4657d680e5551dc253ce164f06b4ac.zip
x86: Free per-cpu area for offline cpu via RCU.
This allows other CPUs to reference per-cpu areas with less strict locking. In particular, timer.c access a per-cpu lock with reference to a per-timer cpu field which it accesses with no synchronisation. One subtlety is that this prevents us bringing a cpu back online until the RCU work is completed. In this case we return EBUSY and the tool stack can report the (unlikely) error, or retry, as it sees fit. Signed-off-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/percpu.c')
-rw-r--r--xen/arch/x86/percpu.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/xen/arch/x86/percpu.c b/xen/arch/x86/percpu.c
index ea82647b42..e5450248ad 100644
--- a/xen/arch/x86/percpu.c
+++ b/xen/arch/x86/percpu.c
@@ -3,6 +3,7 @@
#include <xen/cpu.h>
#include <xen/init.h>
#include <xen/mm.h>
+#include <xen/rcupdate.h>
unsigned long __per_cpu_offset[NR_CPUS];
#define INVALID_PERCPU_AREA (-(long)__per_cpu_start)
@@ -19,7 +20,7 @@ static int init_percpu_area(unsigned int cpu)
{
char *p;
if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA )
- return 0;
+ return -EBUSY;
if ( (p = alloc_xenheap_pages(PERCPU_ORDER, 0)) == NULL )
return -ENOMEM;
memset(p, 0, __per_cpu_data_end - __per_cpu_start);
@@ -27,13 +28,28 @@ static int init_percpu_area(unsigned int cpu)
return 0;
}
-static void free_percpu_area(unsigned int cpu)
+struct free_info {
+ unsigned int cpu;
+ struct rcu_head rcu;
+};
+static DEFINE_PER_CPU(struct free_info, free_info);
+
+static void _free_percpu_area(struct rcu_head *head)
{
+ struct free_info *info = container_of(head, struct free_info, rcu);
+ unsigned int cpu = info->cpu;
char *p = __per_cpu_start + __per_cpu_offset[cpu];
free_xenheap_pages(p, PERCPU_ORDER);
__per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
}
+static void free_percpu_area(unsigned int cpu)
+{
+ struct free_info *info = &per_cpu(free_info, cpu);
+ info->cpu = cpu;
+ call_rcu(&info->rcu, _free_percpu_area);
+}
+
static int cpu_percpu_callback(
struct notifier_block *nfb, unsigned long action, void *hcpu)
{