aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/schedule.c
diff options
context:
space:
mode:
authorJuergen Gross <juergen.gross@ts.fujitsu.com>2011-02-25 11:28:15 +0000
committerJuergen Gross <juergen.gross@ts.fujitsu.com>2011-02-25 11:28:15 +0000
commit53582e3da009e5fa440cf47280bda2c1f92f2dfa (patch)
tree38f3718ee885fc7befe7b21b9cc93df30bcd5460 /xen/common/schedule.c
parenta0f5d3f3b7cdb44bb5d8f657efa1fa46008359ad (diff)
downloadxen-53582e3da009e5fa440cf47280bda2c1f92f2dfa.tar.gz
xen-53582e3da009e5fa440cf47280bda2c1f92f2dfa.tar.bz2
xen-53582e3da009e5fa440cf47280bda2c1f92f2dfa.zip
cpupool: Avoid race when moving cpu between cpupools
Moving cpus between cpupools is done under the schedule lock of the moved cpu. When checking a cpu being member of a cpupool this must be done with the lock of that cpu being held. Hot-unplugging of physical cpus might encounter the same problems, but this should happen only very rarely. Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com> Acked-by: Andre Przywara <andre.przywara@amd.com> Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/common/schedule.c')
-rw-r--r--xen/common/schedule.c43
1 files changed, 36 insertions, 7 deletions
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 5f192d272c..b6ef4769d1 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -394,8 +394,32 @@ static void vcpu_migrate(struct vcpu *v)
{
unsigned long flags;
int old_cpu, new_cpu;
+ int same_lock;
- vcpu_schedule_lock_irqsave(v, flags);
+ for (;;)
+ {
+ vcpu_schedule_lock_irqsave(v, flags);
+
+ /* Select new CPU. */
+ old_cpu = v->processor;
+ new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
+ same_lock = (per_cpu(schedule_data, new_cpu).schedule_lock ==
+ per_cpu(schedule_data, old_cpu).schedule_lock);
+
+ if ( same_lock )
+ break;
+
+ if ( !pcpu_schedule_trylock(new_cpu) )
+ {
+ vcpu_schedule_unlock_irqrestore(v, flags);
+ continue;
+ }
+ if ( cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
+ break;
+
+ pcpu_schedule_unlock(new_cpu);
+ vcpu_schedule_unlock_irqrestore(v, flags);
+ }
/*
* NB. Check of v->running happens /after/ setting migration flag
@@ -405,14 +429,13 @@ static void vcpu_migrate(struct vcpu *v)
if ( v->is_running ||
!test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
{
+ if ( !same_lock )
+ pcpu_schedule_unlock(new_cpu);
+
vcpu_schedule_unlock_irqrestore(v, flags);
return;
}
- /* Select new CPU. */
- old_cpu = v->processor;
- new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
-
/*
* Transfer urgency status to new CPU before switching CPUs, as once
* the switch occurs, v->is_urgent is no longer protected by the per-CPU
@@ -424,9 +447,15 @@ static void vcpu_migrate(struct vcpu *v)
atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
}
- /* Switch to new CPU, then unlock old CPU. This is safe because
- * the lock pointer cant' change while the current lock is held. */
+ /*
+ * Switch to new CPU, then unlock new and old CPU. This is safe because
+ * the lock pointer cant' change while the current lock is held.
+ */
v->processor = new_cpu;
+
+ if ( !same_lock )
+ pcpu_schedule_unlock(new_cpu);
+
spin_unlock_irqrestore(
per_cpu(schedule_data, old_cpu).schedule_lock, flags);