aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/schedule.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2011-04-05 13:00:54 +0100
committerJan Beulich <jbeulich@novell.com>2011-04-05 13:00:54 +0100
commit4551775df58d42e2dcfd2a8ac4bcc713709e8b81 (patch)
tree5e10216631c44c2ec3102f85ca58646d766f46e5 /xen/common/schedule.c
parent2bbb3d0fe9149b302b42c98cffe3fbf1aa286548 (diff)
downloadxen-4551775df58d42e2dcfd2a8ac4bcc713709e8b81.tar.gz
xen-4551775df58d42e2dcfd2a8ac4bcc713709e8b81.tar.bz2
xen-4551775df58d42e2dcfd2a8ac4bcc713709e8b81.zip
Remove direct cpumask_t members from struct vcpu and struct domain
The CPU masks embedded in these structures prevent NR_CPUS-independent sizing of these structures. Basic concept (in xen/include/cpumask.h) taken from recent Linux. For scalability purposes, many other uses of cpumask_t should be replaced by cpumask_var_t, particularly local variables of functions. This implies that no functions should have by-value cpumask_t parameters, and that the whole old cpumask interface (cpus_...()) should go away in favor of the new (cpumask_...()) one. Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/common/schedule.c')
-rw-r--r--xen/common/schedule.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 5efb822a81..dde615ee66 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -196,9 +196,9 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
*/
v->processor = processor;
if ( is_idle_domain(d) || d->is_pinned )
- v->cpu_affinity = cpumask_of_cpu(processor);
+ cpumask_copy(v->cpu_affinity, cpumask_of(processor));
else
- cpus_setall(v->cpu_affinity);
+ cpumask_setall(v->cpu_affinity);
/* Initialise the per-vcpu timers. */
init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
@@ -273,7 +273,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
SCHED_OP(VCPU2OP(v), remove_vcpu, v);
SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
- cpus_setall(v->cpu_affinity);
+ cpumask_setall(v->cpu_affinity);
v->processor = new_p;
v->sched_priv = vcpu_priv[v->vcpu_id];
evtchn_move_pirqs(v);
@@ -435,7 +435,7 @@ static void vcpu_migrate(struct vcpu *v)
*/
if ( pick_called &&
(new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
- cpu_isset(new_cpu, v->cpu_affinity) &&
+ cpumask_test_cpu(new_cpu, v->cpu_affinity) &&
cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
break;
@@ -550,13 +550,13 @@ int cpu_disable_scheduler(unsigned int cpu)
{
vcpu_schedule_lock_irq(v);
- cpus_and(online_affinity, v->cpu_affinity, c->cpu_valid);
+ cpumask_and(&online_affinity, v->cpu_affinity, &c->cpu_valid);
if ( cpus_empty(online_affinity) &&
- cpu_isset(cpu, v->cpu_affinity) )
+ cpumask_test_cpu(cpu, v->cpu_affinity) )
{
printk("Breaking vcpu affinity for domain %d vcpu %d\n",
v->domain->domain_id, v->vcpu_id);
- cpus_setall(v->cpu_affinity);
+ cpumask_setall(v->cpu_affinity);
affinity_broken = 1;
}
@@ -602,10 +602,10 @@ int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
vcpu_schedule_lock_irq(v);
- old_affinity = v->cpu_affinity;
- v->cpu_affinity = *affinity;
- *affinity = old_affinity;
- if ( !cpu_isset(v->processor, v->cpu_affinity) )
+ cpumask_copy(&old_affinity, v->cpu_affinity);
+ cpumask_copy(v->cpu_affinity, affinity);
+ cpumask_copy(affinity, &old_affinity);
+ if ( !cpumask_test_cpu(v->processor, v->cpu_affinity) )
set_bit(_VPF_migrating, &v->pause_flags);
vcpu_schedule_unlock_irq(v);