aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-08-23 15:05:39 +0200
committerJan Beulich <jbeulich@suse.com>2013-08-23 15:05:39 +0200
commita6aed7607de96904a96b8800b454511cf8334979 (patch)
treeca8a64c19cd121416483658fc9e72caa579fa1b4 /xen/common
parent105ee865be224999e301b4303c740c1143b67b1d (diff)
downloadxen-a6aed7607de96904a96b8800b454511cf8334979.tar.gz
xen-a6aed7607de96904a96b8800b454511cf8334979.tar.bz2
xen-a6aed7607de96904a96b8800b454511cf8334979.zip
credit2: replace cpumask_first() uses
... with cpumask_any() or cpumask_cycle(). In one case this also allows elimination of a cpumask_empty() call, and while doing this I also spotted a redundant use of cpumask_weight(). (When running on big systems, operations on CPU masks aren't cheap enough to use them carelessly.) Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org> Reviewed-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/common')
-rw-r--r--xen/common/sched_credit2.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 825ec986e5..ae0abc2ab9 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -515,9 +515,10 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched_vcpu *n
cpumask_andnot(&mask, &rqd->idle, &rqd->tickled);
/* If it's not empty, choose one */
- if ( !cpumask_empty(&mask) )
+ i = cpumask_cycle(cpu, &mask);
+ if ( i < nr_cpu_ids )
{
- ipid = cpumask_first(&mask);
+ ipid = i;
goto tickle;
}
@@ -1091,7 +1092,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
else
{
d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id);
- new_cpu = cpumask_first(&svc->migrate_rqd->active);
+ new_cpu = cpumask_cycle(vc->processor, &svc->migrate_rqd->active);
goto out_up;
}
}
@@ -1138,8 +1139,8 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
new_cpu = vc->processor;
else
{
- BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active));
- new_cpu = cpumask_first(&prv->rqd[min_rqi].active);
+ new_cpu = cpumask_cycle(vc->processor, &prv->rqd[min_rqi].active);
+ BUG_ON(new_cpu >= nr_cpu_ids);
}
out_up:
@@ -1219,7 +1220,7 @@ void migrate(const struct scheduler *ops,
on_runq=1;
}
__runq_deassign(svc);
- svc->vcpu->processor = cpumask_first(&trqd->active);
+ svc->vcpu->processor = cpumask_any(&trqd->active);
__runq_assign(svc, trqd);
if ( on_runq )
{
@@ -1299,8 +1300,9 @@ retry:
load_max = st.orqd->b_avgload;
cpus_max = cpumask_weight(&st.lrqd->active);
- if ( cpumask_weight(&st.orqd->active) > cpus_max )
- cpus_max = cpumask_weight(&st.orqd->active);
+ i = cpumask_weight(&st.orqd->active);
+ if ( i > cpus_max )
+ cpus_max = i;
/* If we're under 100% capacaty, only shift if load difference
* is > 1. otherwise, shift if under 12.5% */