aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2011-11-08 10:31:47 +0100
committerJan Beulich <jbeulich@suse.com>2011-11-08 10:31:47 +0100
commit8996e454310d4fa3f587b65ccfe57e366e68a204 (patch)
treecc54784d70d8cef67db4152d7812b116a1b1b760 /xen/common
parent488355d213f20832ee5699e136fbf957a97d5f5f (diff)
downloadxen-8996e454310d4fa3f587b65ccfe57e366e68a204.tar.gz
xen-8996e454310d4fa3f587b65ccfe57e366e68a204.tar.bz2
xen-8996e454310d4fa3f587b65ccfe57e366e68a204.zip
eliminate cpus_xyz()
Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Diffstat (limited to 'xen/common')
-rw-r--r--xen/common/domain.c2
-rw-r--r--xen/common/perfc.c4
-rw-r--r--xen/common/rcupdate.c2
-rw-r--r--xen/common/sched_credit.c2
-rw-r--r--xen/common/sched_credit2.c20
-rw-r--r--xen/common/schedule.c2
6 files changed, 16 insertions, 16 deletions
diff --git a/xen/common/domain.c b/xen/common/domain.c
index d8ddb23d41..9e355c898d 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -371,7 +371,7 @@ void domain_update_node_affinity(struct domain *d)
cpumask_or(&cpumask, &cpumask, v->cpu_affinity);
for_each_online_node ( node )
- if ( cpus_intersects(node_to_cpumask(node), cpumask) )
+ if ( cpumask_intersects(&node_to_cpumask(node), &cpumask) )
node_set(node, nodemask);
d->node_affinity = nodemask;
diff --git a/xen/common/perfc.c b/xen/common/perfc.c
index 3d30556ea6..81b41ba3d9 100644
--- a/xen/common/perfc.c
+++ b/xen/common/perfc.c
@@ -163,11 +163,11 @@ static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
unsigned int i, j, v;
/* We only copy the name and array-size information once. */
- if ( !cpus_equal(cpu_online_map, perfc_cpumap) )
+ if ( !cpumask_equal(&cpu_online_map, &perfc_cpumap) )
{
unsigned int nr_cpus;
perfc_cpumap = cpu_online_map;
- nr_cpus = cpus_weight(perfc_cpumap);
+ nr_cpus = cpumask_weight(&perfc_cpumap);
perfc_nbr_vals = 0;
diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c
index b14612aff8..6e6483e100 100644
--- a/xen/common/rcupdate.c
+++ b/xen/common/rcupdate.c
@@ -121,7 +121,7 @@ static int rcu_barrier_action(void *_cpu_count)
*/
call_rcu(&data.head, rcu_barrier_callback);
- while ( atomic_read(data.cpu_count) != cpus_weight(cpu_online_map) )
+ while ( atomic_read(data.cpu_count) != num_online_cpus() )
{
process_pending_softirqs();
cpu_relax();
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 4a50ba78a2..4fa6140da5 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1246,7 +1246,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
cpumask_clear_cpu(cpu, &workers);
peer_cpu = cpu;
- while ( !cpus_empty(workers) )
+ while ( !cpumask_empty(&workers) )
{
peer_cpu = cpumask_cycle(peer_cpu, &workers);
cpumask_clear_cpu(peer_cpu, &workers);
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 6e09c84ed6..0592c96c9e 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1009,7 +1009,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
struct csched_vcpu *svc = CSCHED_VCPU(vc);
s_time_t min_avgload;
- BUG_ON(cpus_empty(prv->active_queues));
+ BUG_ON(cpumask_empty(&prv->active_queues));
/* Locking:
* - vc->processor is already locked
@@ -1098,7 +1098,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
new_cpu = vc->processor;
else
{
- BUG_ON(cpus_empty(prv->rqd[min_rqi].active));
+ BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active));
new_cpu = first_cpu(prv->rqd[min_rqi].active);
}
@@ -1258,9 +1258,9 @@ retry:
if ( st.orqd->b_avgload > load_max )
load_max = st.orqd->b_avgload;
- cpus_max=cpus_weight(st.lrqd->active);
- if ( cpus_weight(st.orqd->active) > cpus_max )
- cpus_max = cpus_weight(st.orqd->active);
+ cpus_max = cpumask_weight(&st.lrqd->active);
+ if ( cpumask_weight(&st.orqd->active) > cpus_max )
+ cpus_max = cpumask_weight(&st.orqd->active);
/* If we're under 100% capacaty, only shift if load difference
* is > 1. otherwise, shift if under 12.5% */
@@ -1801,7 +1801,7 @@ csched_dump(const struct scheduler *ops)
printk("Active queues: %d\n"
"\tdefault-weight = %d\n",
- cpus_weight(prv->active_queues),
+ cpumask_weight(&prv->active_queues),
CSCHED_DEFAULT_WEIGHT);
for_each_cpu_mask(i, prv->active_queues)
{
@@ -1815,7 +1815,7 @@ csched_dump(const struct scheduler *ops)
"\tinstload = %d\n"
"\taveload = %3"PRI_stime"\n",
i,
- cpus_weight(prv->rqd[i].active),
+ cpumask_weight(&prv->rqd[i].active),
prv->rqd[i].max_weight,
prv->rqd[i].load,
fraction);
@@ -1852,7 +1852,7 @@ static void activate_runqueue(struct csched_private *prv, int rqi)
rqd = prv->rqd + rqi;
- BUG_ON(!cpus_empty(rqd->active));
+ BUG_ON(!cpumask_empty(&rqd->active));
rqd->max_weight = 1;
rqd->id = rqi;
@@ -1869,7 +1869,7 @@ static void deactivate_runqueue(struct csched_private *prv, int rqi)
rqd = prv->rqd + rqi;
- BUG_ON(!cpus_empty(rqd->active));
+ BUG_ON(!cpumask_empty(&rqd->active));
rqd->id = -1;
@@ -1980,7 +1980,7 @@ csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
cpu_clear(cpu, rqd->idle);
cpu_clear(cpu, rqd->active);
- if ( cpus_empty(rqd->active) )
+ if ( cpumask_empty(&rqd->active) )
{
printk(" No cpus left on runqueue, disabling\n");
deactivate_runqueue(prv, rqi);
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index bcdae7190a..b22cf67fe5 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -550,7 +550,7 @@ int cpu_disable_scheduler(unsigned int cpu)
vcpu_schedule_lock_irq(v);
cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
- if ( cpus_empty(online_affinity) &&
+ if ( cpumask_empty(&online_affinity) &&
cpumask_test_cpu(cpu, v->cpu_affinity) )
{
printk("Breaking vcpu affinity for domain %d vcpu %d\n",