aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/sched_credit2.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-10-14 08:57:56 +0200
committerJan Beulich <jbeulich@suse.com>2013-10-14 08:57:56 +0200
commiteedd60391610629b4e8a2e8278b857ff884f750d (patch)
tree2074ed67f4e6eadf0c5ace2f94442411be7fdc67 /xen/common/sched_credit2.c
parent48830988a28b7fb1eed225354e25572aa955749a (diff)
downloadxen-eedd60391610629b4e8a2e8278b857ff884f750d.tar.gz
xen-eedd60391610629b4e8a2e8278b857ff884f750d.tar.bz2
xen-eedd60391610629b4e8a2e8278b857ff884f750d.zip
scheduler: adjust internal locking interface
Make the locking functions return the lock pointers, so they can be passed to the unlocking functions (which in turn can check that the lock is still actually providing the intended protection, i.e. the parameters determining which lock is the right one didn't change). Further use proper spin lock primitives rather than open coded local_irq_...() constructs, so that interrupts can be re-enabled as appropriate while spinning. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common/sched_credit2.c')
-rw-r--r--xen/common/sched_credit2.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index ae0abc2ab9..84e547bad8 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -882,15 +882,17 @@ csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
*/
if ( ! is_idle_vcpu(vc) )
{
+ spinlock_t *lock;
+
/* FIXME: Do we need the private lock here? */
list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
/* Add vcpu to runqueue of initial processor */
- vcpu_schedule_lock_irq(vc);
+ lock = vcpu_schedule_lock_irq(vc);
runq_assign(ops, vc);
- vcpu_schedule_unlock_irq(vc);
+ vcpu_schedule_unlock_irq(lock, vc);
sdom->nr_vcpus++;
}
@@ -917,14 +919,16 @@ csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
if ( ! is_idle_vcpu(vc) )
{
+ spinlock_t *lock;
+
SCHED_STAT_CRANK(vcpu_destroy);
/* Remove from runqueue */
- vcpu_schedule_lock_irq(vc);
+ lock = vcpu_schedule_lock_irq(vc);
runq_deassign(ops, vc);
- vcpu_schedule_unlock_irq(vc);
+ vcpu_schedule_unlock_irq(lock, vc);
/* Remove from sdom list. Don't need a lock for this, as it's called
* syncronously when nothing else can happen. */
@@ -1011,8 +1015,7 @@ csched_context_saved(const struct scheduler *ops, struct vcpu *vc)
{
struct csched_vcpu * const svc = CSCHED_VCPU(vc);
s_time_t now = NOW();
-
- vcpu_schedule_lock_irq(vc);
+ spinlock_t *lock = vcpu_schedule_lock_irq(vc);
BUG_ON( !is_idle_vcpu(vc) && svc->rqd != RQD(ops, vc->processor));
@@ -1038,7 +1041,7 @@ csched_context_saved(const struct scheduler *ops, struct vcpu *vc)
else if ( !is_idle_vcpu(vc) )
update_load(ops, svc->rqd, svc, -1, now);
- vcpu_schedule_unlock_irq(vc);
+ vcpu_schedule_unlock_irq(lock, vc);
}
#define MAX_LOAD (1ULL<<60);
@@ -1456,14 +1459,14 @@ csched_dom_cntl(
* must never lock csched_priv.lock if we're holding a runqueue lock.
* Also, calling vcpu_schedule_lock() is enough, since IRQs have already
* been disabled. */
- vcpu_schedule_lock(svc->vcpu);
+ spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
BUG_ON(svc->rqd != RQD(ops, svc->vcpu->processor));
svc->weight = sdom->weight;
update_max_weight(svc->rqd, svc->weight, old_weight);
- vcpu_schedule_unlock(svc->vcpu);
+ vcpu_schedule_unlock(lock, svc->vcpu);
}
}
}
@@ -1993,6 +1996,7 @@ static void init_pcpu(const struct scheduler *ops, int cpu)
cpumask_set_cpu(cpu, &rqd->idle);
cpumask_set_cpu(cpu, &rqd->active);
+ /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
spin_unlock(old_lock);
cpumask_set_cpu(cpu, &prv->initialized);