diff options
author | Jan Beulich <jbeulich@suse.com> | 2013-10-14 08:57:56 +0200 |
---|---|---|
committer | Jan Beulich <jbeulich@suse.com> | 2013-10-14 08:57:56 +0200 |
commit | eedd60391610629b4e8a2e8278b857ff884f750d (patch) | |
tree | 2074ed67f4e6eadf0c5ace2f94442411be7fdc67 /xen/common/sched_credit.c | |
parent | 48830988a28b7fb1eed225354e25572aa955749a (diff) | |
download | xen-eedd60391610629b4e8a2e8278b857ff884f750d.tar.gz xen-eedd60391610629b4e8a2e8278b857ff884f750d.tar.bz2 xen-eedd60391610629b4e8a2e8278b857ff884f750d.zip |
scheduler: adjust internal locking interface
Make the locking functions return the lock pointers, so they can be
passed to the unlocking functions (which in turn can check that the
lock is still actually providing the intended protection, i.e. the
parameters determining which lock is the right one didn't change).
Further use proper spin lock primitives rather than open coded
local_irq_...() constructs, so that interrupts can be re-enabled as
appropriate while spinning.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common/sched_credit.c')
-rw-r--r-- | xen/common/sched_credit.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 3d6ea7acf3..192c653937 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -1166,6 +1166,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu) struct csched_pcpu * const spc = CSCHED_PCPU(cpu); struct list_head *runq, *elem, *next, *last_under; struct csched_vcpu *svc_elem; + spinlock_t *lock; unsigned long flags; int sort_epoch; @@ -1175,7 +1176,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu) spc->runq_sort_last = sort_epoch; - pcpu_schedule_lock_irqsave(cpu, flags); + lock = pcpu_schedule_lock_irqsave(cpu, &flags); runq = &spc->runq; elem = runq->next; @@ -1200,7 +1201,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu) elem = next; } - pcpu_schedule_unlock_irqrestore(cpu, flags); + pcpu_schedule_unlock_irqrestore(lock, flags, cpu); } static void @@ -1563,7 +1564,9 @@ csched_load_balance(struct csched_private *prv, int cpu, * could cause a deadlock if the peer CPU is also load * balancing and trying to lock this CPU. */ - if ( !pcpu_schedule_trylock(peer_cpu) ) + spinlock_t *lock = pcpu_schedule_trylock(peer_cpu); + + if ( !lock ) { SCHED_STAT_CRANK(steal_trylock_failed); peer_cpu = cpumask_cycle(peer_cpu, &workers); @@ -1573,7 +1576,7 @@ csched_load_balance(struct csched_private *prv, int cpu, /* Any work over there to steal? */ speer = cpumask_test_cpu(peer_cpu, online) ? csched_runq_steal(peer_cpu, cpu, snext->pri, bstep) : NULL; - pcpu_schedule_unlock(peer_cpu); + pcpu_schedule_unlock(lock, peer_cpu); /* As soon as one vcpu is found, balancing ends */ if ( speer != NULL ) |