aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/sched_credit2.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2011-11-08 10:31:47 +0100
committerJan Beulich <jbeulich@suse.com>2011-11-08 10:31:47 +0100
commit8996e454310d4fa3f587b65ccfe57e366e68a204 (patch)
treecc54784d70d8cef67db4152d7812b116a1b1b760 /xen/common/sched_credit2.c
parent488355d213f20832ee5699e136fbf957a97d5f5f (diff)
downloadxen-8996e454310d4fa3f587b65ccfe57e366e68a204.tar.gz
xen-8996e454310d4fa3f587b65ccfe57e366e68a204.tar.bz2
xen-8996e454310d4fa3f587b65ccfe57e366e68a204.zip
eliminate cpus_xyz()
Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Diffstat (limited to 'xen/common/sched_credit2.c')
-rw-r--r--xen/common/sched_credit2.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 6e09c84ed6..0592c96c9e 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1009,7 +1009,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
struct csched_vcpu *svc = CSCHED_VCPU(vc);
s_time_t min_avgload;
- BUG_ON(cpus_empty(prv->active_queues));
+ BUG_ON(cpumask_empty(&prv->active_queues));
/* Locking:
* - vc->processor is already locked
@@ -1098,7 +1098,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
new_cpu = vc->processor;
else
{
- BUG_ON(cpus_empty(prv->rqd[min_rqi].active));
+ BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active));
new_cpu = first_cpu(prv->rqd[min_rqi].active);
}
@@ -1258,9 +1258,9 @@ retry:
if ( st.orqd->b_avgload > load_max )
load_max = st.orqd->b_avgload;
- cpus_max=cpus_weight(st.lrqd->active);
- if ( cpus_weight(st.orqd->active) > cpus_max )
- cpus_max = cpus_weight(st.orqd->active);
+ cpus_max = cpumask_weight(&st.lrqd->active);
+ if ( cpumask_weight(&st.orqd->active) > cpus_max )
+ cpus_max = cpumask_weight(&st.orqd->active);
/* If we're under 100% capacaty, only shift if load difference
* is > 1. otherwise, shift if under 12.5% */
@@ -1801,7 +1801,7 @@ csched_dump(const struct scheduler *ops)
printk("Active queues: %d\n"
"\tdefault-weight = %d\n",
- cpus_weight(prv->active_queues),
+ cpumask_weight(&prv->active_queues),
CSCHED_DEFAULT_WEIGHT);
for_each_cpu_mask(i, prv->active_queues)
{
@@ -1815,7 +1815,7 @@ csched_dump(const struct scheduler *ops)
"\tinstload = %d\n"
"\taveload = %3"PRI_stime"\n",
i,
- cpus_weight(prv->rqd[i].active),
+ cpumask_weight(&prv->rqd[i].active),
prv->rqd[i].max_weight,
prv->rqd[i].load,
fraction);
@@ -1852,7 +1852,7 @@ static void activate_runqueue(struct csched_private *prv, int rqi)
rqd = prv->rqd + rqi;
- BUG_ON(!cpus_empty(rqd->active));
+ BUG_ON(!cpumask_empty(&rqd->active));
rqd->max_weight = 1;
rqd->id = rqi;
@@ -1869,7 +1869,7 @@ static void deactivate_runqueue(struct csched_private *prv, int rqi)
rqd = prv->rqd + rqi;
- BUG_ON(!cpus_empty(rqd->active));
+ BUG_ON(!cpumask_empty(&rqd->active));
rqd->id = -1;
@@ -1980,7 +1980,7 @@ csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
cpu_clear(cpu, rqd->idle);
cpu_clear(cpu, rqd->active);
- if ( cpus_empty(rqd->active) )
+ if ( cpumask_empty(&rqd->active) )
{
printk(" No cpus left on runqueue, disabling\n");
deactivate_runqueue(prv, rqi);