aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/sched_credit.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-12-24 08:26:59 +0000
committerKeir Fraser <keir@xen.org>2010-12-24 08:26:59 +0000
commitd2f6b6016990b570c6782e1639ca1c0b07013b59 (patch)
tree18098d7d1f3581e1500ce0bdd819083c8b5d702f /xen/common/sched_credit.c
parent188be59890f39c0af54367e28cd0a80b80343b55 (diff)
downloadxen-d2f6b6016990b570c6782e1639ca1c0b07013b59.tar.gz
xen-d2f6b6016990b570c6782e1639ca1c0b07013b59.tar.bz2
xen-d2f6b6016990b570c6782e1639ca1c0b07013b59.zip
scheduler: Introduce pcpu_schedule_lock
Many places in Xen, particularly schedule.c, grab the per-cpu spinlock directly, rather than through vcpu_schedule_lock(). Since the lock pointer may change between the time it's read and the time the lock is successfully acquired, we need to check after acquiring the lock to make sure that the pcpu's lock hasn't changed, due to cpu initialization or cpupool activity. Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/common/sched_credit.c')
-rw-r--r--xen/common/sched_credit.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index aad47706a9..bfe20d30d0 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -905,7 +905,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu)
spc->runq_sort_last = sort_epoch;
- spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags);
+ pcpu_schedule_lock_irqsave(cpu, flags);
runq = &spc->runq;
elem = runq->next;
@@ -930,7 +930,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu)
elem = next;
}
- spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags);
+ pcpu_schedule_unlock_irqrestore(cpu, flags);
}
static void
@@ -1259,7 +1259,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
* cause a deadlock if the peer CPU is also load balancing and trying
* to lock this CPU.
*/
- if ( !spin_trylock(per_cpu(schedule_data, peer_cpu).schedule_lock) )
+ if ( !pcpu_schedule_trylock(peer_cpu) )
{
CSCHED_STAT_CRANK(steal_trylock_failed);
continue;
@@ -1269,7 +1269,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
* Any work over there to steal?
*/
speer = csched_runq_steal(peer_cpu, cpu, snext->pri);
- spin_unlock(per_cpu(schedule_data, peer_cpu).schedule_lock);
+ pcpu_schedule_unlock(peer_cpu);
if ( speer != NULL )
{
*stolen = 1;