aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/domain.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2011-04-05 13:00:54 +0100
committerJan Beulich <jbeulich@novell.com>2011-04-05 13:00:54 +0100
commit4551775df58d42e2dcfd2a8ac4bcc713709e8b81 (patch)
tree5e10216631c44c2ec3102f85ca58646d766f46e5 /xen/common/domain.c
parent2bbb3d0fe9149b302b42c98cffe3fbf1aa286548 (diff)
downloadxen-4551775df58d42e2dcfd2a8ac4bcc713709e8b81.tar.gz
xen-4551775df58d42e2dcfd2a8ac4bcc713709e8b81.tar.bz2
xen-4551775df58d42e2dcfd2a8ac4bcc713709e8b81.zip
Remove direct cpumask_t members from struct vcpu and struct domain
The CPU masks embedded in these structures prevent NR_CPUS-independent sizing of these structures. Basic concept (in xen/include/cpumask.h) taken from recent Linux. For scalability purposes, many other uses of cpumask_t should be replaced by cpumask_var_t, particularly local variables of functions. This implies that no functions should have by-value cpumask_t parameters, and that the whole old cpumask interface (cpus_...()) should go away in favor of the new (cpumask_...()) one. Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/common/domain.c')
-rw-r--r--xen/common/domain.c30
1 files changed, 23 insertions, 7 deletions
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 51aa0400df..1290f655fe 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -151,6 +151,11 @@ struct vcpu *alloc_vcpu(
tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);
+ if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
+ !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
+ !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
+ goto fail_free;
+
if ( is_idle_domain(d) )
{
v->runstate.state = RUNSTATE_running;
@@ -167,16 +172,17 @@ struct vcpu *alloc_vcpu(
}
if ( sched_init_vcpu(v, cpu_id) != 0 )
- {
- destroy_waitqueue_vcpu(v);
- free_vcpu_struct(v);
- return NULL;
- }
+ goto fail_wq;
if ( vcpu_initialise(v) != 0 )
{
sched_destroy_vcpu(v);
+ fail_wq:
destroy_waitqueue_vcpu(v);
+ fail_free:
+ free_cpumask_var(v->cpu_affinity);
+ free_cpumask_var(v->cpu_affinity_tmp);
+ free_cpumask_var(v->vcpu_dirty_cpumask);
free_vcpu_struct(v);
return NULL;
}
@@ -246,6 +252,9 @@ struct domain *domain_create(
spin_lock_init(&d->shutdown_lock);
d->shutdown_code = -1;
+ if ( !zalloc_cpumask_var(&d->domain_dirty_cpumask) )
+ goto fail;
+
if ( domcr_flags & DOMCRF_hvm )
d->is_hvm = 1;
@@ -346,6 +355,7 @@ struct domain *domain_create(
xsm_free_security_domain(d);
xfree(d->pirq_mask);
xfree(d->pirq_to_evtchn);
+ free_cpumask_var(d->domain_dirty_cpumask);
free_domain_struct(d);
return NULL;
}
@@ -361,7 +371,7 @@ void domain_update_node_affinity(struct domain *d)
spin_lock(&d->node_affinity_lock);
for_each_vcpu ( d, v )
- cpus_or(cpumask, cpumask, v->cpu_affinity);
+ cpumask_or(&cpumask, &cpumask, v->cpu_affinity);
for_each_online_node ( node )
if ( cpus_intersects(node_to_cpumask(node), cpumask) )
@@ -658,7 +668,12 @@ static void complete_domain_destroy(struct rcu_head *head)
for ( i = d->max_vcpus - 1; i >= 0; i-- )
if ( (v = d->vcpu[i]) != NULL )
+ {
+ free_cpumask_var(v->cpu_affinity);
+ free_cpumask_var(v->cpu_affinity_tmp);
+ free_cpumask_var(v->vcpu_dirty_cpumask);
free_vcpu_struct(v);
+ }
if ( d->target != NULL )
put_domain(d->target);
@@ -669,6 +684,7 @@ static void complete_domain_destroy(struct rcu_head *head)
xfree(d->pirq_to_evtchn);
xsm_free_security_domain(d);
+ free_cpumask_var(d->domain_dirty_cpumask);
free_domain_struct(d);
send_guest_global_virq(dom0, VIRQ_DOM_EXC);
@@ -789,7 +805,7 @@ void vcpu_reset(struct vcpu *v)
v->async_exception_mask = 0;
memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
#endif
- cpus_clear(v->cpu_affinity_tmp);
+ cpumask_clear(v->cpu_affinity_tmp);
clear_bit(_VPF_blocked, &v->pause_flags);
domain_unlock(v->domain);