aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/xen/sched-if.h
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-12-24 08:26:29 +0000
committerKeir Fraser <keir@xen.org>2010-12-24 08:26:29 +0000
commit188be59890f39c0af54367e28cd0a80b80343b55 (patch)
tree7de50972f4003d349656b4ecdb93ca2df01910df /xen/include/xen/sched-if.h
parent71ac759b144fc0344f08f6c1c335ee53c5516ce6 (diff)
downloadxen-188be59890f39c0af54367e28cd0a80b80343b55.tar.gz
xen-188be59890f39c0af54367e28cd0a80b80343b55.tar.bz2
xen-188be59890f39c0af54367e28cd0a80b80343b55.zip
scheduler: Update vcpu_schedule_lock to check for changed lock pointer as well
Credit2 has different cpus share a lock; which means that as cpus are added, and as they're moved between pools, the pointer to the scheduler lock may also change as well. Since we don't want to have to grab a lock before grabbing the per-cpu scheduler lock, we use the lock itself to protect against the pointer changing. However, since it may change between reading and locking, after we grab the lock we need to check to make sure it's still the right one. Update the vcpu_schedule_lock() definition to reflect this: both v->processor and that processor's schedule lock are liable to change; check both after grabbing the lock, and release / re-acquire if necessary. Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/include/xen/sched-if.h')
-rw-r--r--xen/include/xen/sched-if.h24
1 files changed, 13 insertions, 11 deletions
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 8006820284..84f7f5a1c8 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -41,23 +41,25 @@ DECLARE_PER_CPU(struct cpupool *, cpupool);
static inline void vcpu_schedule_lock(struct vcpu *v)
{
- unsigned int cpu;
+ spinlock_t * lock;
for ( ; ; )
{
- /* NB: For schedulers with multiple cores per runqueue,
- * a vcpu may change processor w/o changing runqueues;
- * so we may release a lock only to grab it again.
+ /* v->processor may change when grabbing the lock; but
+ * per_cpu(v->processor) may also change, if changing
+ * cpu pool also changes the scheduler lock. Retry
+ * until they match.
*
- * If that is measured to be an issue, then the check
- * should be changed to checking if the locks pointed to
- * by cpu and v->processor are still the same.
+ * It may also be the case that v->processor may change
+ * but the lock may be the same; this will succeed
+ * in that case.
*/
- cpu = v->processor;
- spin_lock(per_cpu(schedule_data, cpu).schedule_lock);
- if ( likely(v->processor == cpu) )
+ lock=per_cpu(schedule_data, v->processor).schedule_lock;
+
+ spin_lock(lock);
+ if ( likely(lock == per_cpu(schedule_data, v->processor).schedule_lock) )
break;
- spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
+ spin_unlock(lock);
}
}