diff options
author | Keir Fraser <keir@xen.org> | 2010-12-24 08:26:59 +0000 |
---|---|---|
committer | Keir Fraser <keir@xen.org> | 2010-12-24 08:26:59 +0000 |
commit | d2f6b6016990b570c6782e1639ca1c0b07013b59 (patch) | |
tree | 18098d7d1f3581e1500ce0bdd819083c8b5d702f /xen/common/schedule.c | |
parent | 188be59890f39c0af54367e28cd0a80b80343b55 (diff) | |
download | xen-d2f6b6016990b570c6782e1639ca1c0b07013b59.tar.gz xen-d2f6b6016990b570c6782e1639ca1c0b07013b59.tar.bz2 xen-d2f6b6016990b570c6782e1639ca1c0b07013b59.zip |
scheduler: Introduce pcpu_schedule_lock
Many places in Xen, particularly schedule.c, grab the per-cpu spinlock
directly, rather than through vcpu_schedule_lock(). Since the lock
pointer may change between the time it's read and the time the lock is
successfully acquired, we need to check after acquiring the lock to
make sure that the pcpu's lock hasn't changed, due to cpu
initialization or cpupool activity.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/common/schedule.c')
-rw-r--r-- | xen/common/schedule.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 812b0d1a4f..69996b2f60 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -424,7 +424,8 @@ static void vcpu_migrate(struct vcpu *v) atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count); } - /* Switch to new CPU, then unlock old CPU. */ + /* Switch to new CPU, then unlock old CPU. This is safe because + * the lock pointer cant' change while the current lock is held. */ v->processor = new_cpu; spin_unlock_irqrestore( per_cpu(schedule_data, old_cpu).schedule_lock, flags); @@ -1302,7 +1303,7 @@ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c) ppriv = SCHED_OP(new_ops, alloc_pdata, cpu); vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv); - spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags); + pcpu_schedule_lock_irqsave(cpu, flags); SCHED_OP(old_ops, tick_suspend, cpu); vpriv_old = idle->sched_priv; @@ -1313,7 +1314,7 @@ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c) SCHED_OP(new_ops, tick_resume, cpu); SCHED_OP(new_ops, insert_vcpu, idle); - spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags); + pcpu_schedule_unlock_irqrestore(cpu, flags); SCHED_OP(old_ops, free_vdata, vpriv_old); SCHED_OP(old_ops, free_pdata, ppriv_old, cpu); @@ -1369,10 +1370,10 @@ void schedule_dump(struct cpupool *c) for_each_cpu_mask (i, *cpus) { - spin_lock(per_cpu(schedule_data, i).schedule_lock); + pcpu_schedule_lock(i); printk("CPU[%02d] ", i); SCHED_OP(sched, dump_cpu_state, i); - spin_unlock(per_cpu(schedule_data, i).schedule_lock); + pcpu_schedule_unlock(i); } } |