aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/rcupdate.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-01-14 16:38:51 +0000
committerKeir Fraser <keir@xen.org>2011-01-14 16:38:51 +0000
commite7adac50b0bd072af734d245204d70c054844eec (patch)
treef0aba7b867ddfec203bc488aac90ee6869666308 /xen/common/rcupdate.c
parentecc523710a45b2301866b023435517600aec09b9 (diff)
downloadxen-e7adac50b0bd072af734d245204d70c054844eec.tar.gz
xen-e7adac50b0bd072af734d245204d70c054844eec.tar.bz2
xen-e7adac50b0bd072af734d245204d70c054844eec.zip
rcupdate: Make rcu_barrier() more paranoia-proof
I'm not sure my original barrier function is correct. It may allow a CPU to exit the barrier loop, with no local work to do, while RCU work is pending on other CPUs and needing one or more quiescent periods to flush the work through. Although rcu_pending() may handle this, it is easiest to follow Linux's example and simply call_rcu() a callback function on every CPU. When the callback has executed on every CPU, we know that all previously-queued RCU work is completed, and we can exit the barrier. Signed-off-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common/rcupdate.c')
-rw-r--r--xen/common/rcupdate.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c
index 9087a56d9c..8f795d9a37 100644
--- a/xen/common/rcupdate.c
+++ b/xen/common/rcupdate.c
@@ -61,16 +61,34 @@ static int qhimark = 10000;
static int qlowmark = 100;
static int rsinterval = 1000;
-static int rcu_barrier_action(void *unused)
+struct rcu_barrier_data {
+ struct rcu_head head;
+ atomic_t *cpu_count;
+};
+
+static void rcu_barrier_callback(struct rcu_head *head)
+{
+ struct rcu_barrier_data *data = container_of(
+ head, struct rcu_barrier_data, head);
+ atomic_inc(data->cpu_count);
+}
+
+static int rcu_barrier_action(void *_cpu_count)
{
- unsigned int cpu = smp_processor_id();
+ struct rcu_barrier_data data = { .cpu_count = _cpu_count };
ASSERT(!local_irq_is_enabled());
local_irq_enable();
- while ( rcu_needs_cpu(cpu) )
+ /*
+ * When callback is executed, all previously-queued RCU work on this CPU
+ * is completed. When all CPUs have executed their callback, data.cpu_count
+ * will have been incremented to include every online CPU.
+ */
+ call_rcu(&data.head, rcu_barrier_callback);
+
+ while ( atomic_read(data.cpu_count) != cpus_weight(cpu_online_map) )
{
- rcu_check_callbacks(cpu);
process_pending_softirqs();
cpu_relax();
}
@@ -82,7 +100,8 @@ static int rcu_barrier_action(void *unused)
int rcu_barrier(void)
{
- return stop_machine_run(rcu_barrier_action, NULL, NR_CPUS);
+ atomic_t cpu_count = ATOMIC_INIT(0);
+ return stop_machine_run(rcu_barrier_action, &cpu_count, NR_CPUS);
}
static void force_quiescent_state(struct rcu_data *rdp,