aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2011-10-21 09:43:35 +0200
committerJan Beulich <jbeulich@suse.com>2011-10-21 09:43:35 +0200
commitd4b41687ff590b3bf8ef0b2d5d9b0b1a15c0c10c (patch)
tree9921bad0084b6073901065f61168fed878e42e36
parent985d4520485eaa412394a81b0bbb576f996d34b3 (diff)
downloadxen-d4b41687ff590b3bf8ef0b2d5d9b0b1a15c0c10c.tar.gz
xen-d4b41687ff590b3bf8ef0b2d5d9b0b1a15c0c10c.tar.bz2
xen-d4b41687ff590b3bf8ef0b2d5d9b0b1a15c0c10c.zip
cpupools: allocate CPU masks dynamically
Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
-rw-r--r--xen/arch/x86/domain_build.c4
-rw-r--r--xen/arch/x86/smpboot.c4
-rw-r--r--xen/common/cpupool.c33
-rw-r--r--xen/common/domctl.c2
-rw-r--r--xen/common/sched_credit.c2
-rw-r--r--xen/common/sched_credit2.c2
-rw-r--r--xen/common/sched_sedf.c2
-rw-r--r--xen/common/schedule.c14
-rw-r--r--xen/include/xen/sched-if.h2
-rw-r--r--xen/include/xen/sched.h2
10 files changed, 37 insertions, 30 deletions
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 364968d70c..1b3818fc7f 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -885,10 +885,10 @@ int __init construct_dom0(
printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus);
- cpu = first_cpu(cpupool0->cpu_valid);
+ cpu = cpumask_first(cpupool0->cpu_valid);
for ( i = 1; i < opt_dom0_max_vcpus; i++ )
{
- cpu = cycle_cpu(cpu, cpupool0->cpu_valid);
+ cpu = cpumask_cycle(cpu, cpupool0->cpu_valid);
(void)alloc_vcpu(d, i, cpu);
}
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index e9c613cdcc..24954684df 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -850,8 +850,8 @@ void __cpu_disable(void)
remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */
- cpu_clear(cpu, cpupool0->cpu_valid);
- cpu_clear(cpu, cpu_online_map);
+ cpumask_clear_cpu(cpu, cpupool0->cpu_valid);
+ cpumask_clear_cpu(cpu, &cpu_online_map);
fixup_irqs();
if ( cpu_disable_scheduler(cpu) )
diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index 459edf73fb..e3ad20bf37 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -39,11 +39,18 @@ DEFINE_PER_CPU(struct cpupool *, cpupool);
static struct cpupool *alloc_cpupool_struct(void)
{
- return xzalloc(struct cpupool);
+ struct cpupool *c = xzalloc(struct cpupool);
+
+ if ( c && zalloc_cpumask_var(&c->cpu_valid) )
+ return c;
+ xfree(c);
+ return NULL;
}
static void free_cpupool_struct(struct cpupool *c)
{
+ if ( c )
+ free_cpumask_var(c->cpu_valid);
xfree(c);
}
@@ -191,7 +198,7 @@ static int cpupool_destroy(struct cpupool *c)
spin_unlock(&cpupool_lock);
return -ENOENT;
}
- if ( (c->n_dom != 0) || cpus_weight(c->cpu_valid) )
+ if ( (c->n_dom != 0) || cpumask_weight(c->cpu_valid) )
{
spin_unlock(&cpupool_lock);
return -EBUSY;
@@ -232,7 +239,7 @@ static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
cpupool_put(cpupool_cpu_moving);
cpupool_cpu_moving = NULL;
}
- cpu_set(cpu, c->cpu_valid);
+ cpumask_set_cpu(cpu, c->cpu_valid);
return 0;
}
@@ -296,10 +303,10 @@ int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
goto out;
ret = 0;
- if ( !cpu_isset(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
+ if ( !cpumask_test_cpu(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
goto out;
- if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) &&
+ if ( (c->n_dom > 0) && (cpumask_weight(c->cpu_valid) == 1) &&
(cpu != cpupool_moving_cpu) )
{
for_each_domain(d)
@@ -326,15 +333,15 @@ int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
cpupool_moving_cpu = cpu;
atomic_inc(&c->refcnt);
cpupool_cpu_moving = c;
- cpu_clear(cpu, c->cpu_valid);
+ cpumask_clear_cpu(cpu, c->cpu_valid);
spin_unlock(&cpupool_lock);
work_cpu = smp_processor_id();
if ( work_cpu == cpu )
{
- work_cpu = first_cpu(cpupool0->cpu_valid);
+ work_cpu = cpumask_first(cpupool0->cpu_valid);
if ( work_cpu == cpu )
- work_cpu = next_cpu(cpu, cpupool0->cpu_valid);
+ work_cpu = cpumask_next(cpu, cpupool0->cpu_valid);
}
return continue_hypercall_on_cpu(work_cpu, cpupool_unassign_cpu_helper, c);
@@ -361,7 +368,7 @@ int cpupool_add_domain(struct domain *d, int poolid)
return 0;
spin_lock(&cpupool_lock);
c = cpupool_find_by_id(poolid);
- if ( (c != NULL) && cpus_weight(c->cpu_valid) )
+ if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
{
c->n_dom++;
n_dom = c->n_dom;
@@ -418,7 +425,7 @@ static int cpupool_cpu_remove(unsigned int cpu)
int ret = 0;
spin_lock(&cpupool_lock);
- if ( !cpu_isset(cpu, cpupool0->cpu_valid))
+ if ( !cpumask_test_cpu(cpu, cpupool0->cpu_valid))
ret = -EBUSY;
else
cpu_set(cpu, cpupool_locked_cpus);
@@ -473,7 +480,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
op->cpupool_id = c->cpupool_id;
op->sched_id = c->sched->sched_id;
op->n_dom = c->n_dom;
- ret = cpumask_to_xenctl_cpumap(&op->cpumap, &c->cpu_valid);
+ ret = cpumask_to_xenctl_cpumap(&op->cpumap, c->cpu_valid);
cpupool_put(c);
}
break;
@@ -516,7 +523,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
break;
cpu = op->cpu;
if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
- cpu = last_cpu(c->cpu_valid);
+ cpu = cpumask_last(c->cpu_valid);
ret = (cpu < nr_cpu_ids) ? cpupool_unassign_cpu(c, cpu) : -EINVAL;
cpupool_put(c);
}
@@ -550,7 +557,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
ret = -ENOENT;
spin_lock(&cpupool_lock);
c = cpupool_find_by_id(op->cpupool_id);
- if ( (c != NULL) && cpus_weight(c->cpu_valid) )
+ if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
{
d->cpupool->n_dom--;
ret = sched_move_domain(d, c);
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 685901a4d7..c524659d4f 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -502,7 +502,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
goto maxvcpu_out;
ret = -ENOMEM;
- online = (d->cpupool == NULL) ? &cpu_online_map : &d->cpupool->cpu_valid;
+ online = (d->cpupool == NULL) ? &cpu_online_map : d->cpupool->cpu_valid;
if ( max > d->max_vcpus )
{
struct vcpu **vcpus;
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index cfd90a7cc8..8d8f6cd36d 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -73,7 +73,7 @@
#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv)
#define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq))
#define CSCHED_CPUONLINE(_pool) \
- (((_pool) == NULL) ? &cpupool_free_cpus : &(_pool)->cpu_valid)
+ (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
/*
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index e9c450cbf6..6e09c84ed6 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -176,7 +176,7 @@ integer_param("sched_credit2_migrate_resist", opt_migrate_resist);
#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv)
#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv)
#define CSCHED_CPUONLINE(_pool) \
- (((_pool) == NULL) ? &cpupool_free_cpus : &(_pool)->cpu_valid)
+ (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
/* CPU to runq_id macro */
#define c2r(_ops, _cpu) (CSCHED_PRIV(_ops)->runq_map[(_cpu)])
/* CPU to runqueue struct macro */
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index ae93f39420..6fe7ffa8b2 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -22,7 +22,7 @@
} while ( 0 )
#define SEDF_CPUONLINE(_pool) \
- (((_pool) == NULL) ? &cpupool_free_cpus : &(_pool)->cpu_valid)
+ (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
#ifndef NDEBUG
#define SEDF_STATS
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index c4ccc79997..bcdae7190a 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -74,7 +74,7 @@ static struct scheduler __read_mostly ops;
#define VCPU2OP(_v) (DOM2OP((_v)->domain))
#define VCPU2ONLINE(_v) \
(((_v)->domain->cpupool == NULL) ? &cpu_online_map \
- : &(_v)->domain->cpupool->cpu_valid)
+ : (_v)->domain->cpupool->cpu_valid)
static inline void trace_runstate_change(struct vcpu *v, int new_state)
{
@@ -258,7 +258,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
domain_pause(d);
- new_p = first_cpu(c->cpu_valid);
+ new_p = cpumask_first(c->cpu_valid);
for_each_vcpu ( d, v )
{
migrate_timer(&v->periodic_timer, new_p);
@@ -273,7 +273,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
v->sched_priv = vcpu_priv[v->vcpu_id];
evtchn_move_pirqs(v);
- new_p = cycle_cpu(new_p, c->cpu_valid);
+ new_p = cpumask_cycle(new_p, c->cpu_valid);
SCHED_OP(VCPU2OP(v), insert_vcpu, v);
}
@@ -431,13 +431,13 @@ static void vcpu_migrate(struct vcpu *v)
if ( pick_called &&
(new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
cpumask_test_cpu(new_cpu, v->cpu_affinity) &&
- cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
+ cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
break;
/* Select a new CPU. */
new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
- cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
+ cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
break;
pick_called = 1;
}
@@ -549,7 +549,7 @@ int cpu_disable_scheduler(unsigned int cpu)
{
vcpu_schedule_lock_irq(v);
- cpumask_and(&online_affinity, v->cpu_affinity, &c->cpu_valid);
+ cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
if ( cpus_empty(online_affinity) &&
cpumask_test_cpu(cpu, v->cpu_affinity) )
{
@@ -1446,7 +1446,7 @@ void schedule_dump(struct cpupool *c)
cpumask_t *cpus;
sched = (c == NULL) ? &ops : c->sched;
- cpus = (c == NULL) ? &cpupool_free_cpus : &c->cpu_valid;
+ cpus = (c == NULL) ? &cpupool_free_cpus : c->cpu_valid;
printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
SCHED_OP(sched, dump_settings);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 72a1293531..d9519f0d6c 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -192,7 +192,7 @@ extern const struct scheduler sched_arinc653_def;
struct cpupool
{
int cpupool_id;
- cpumask_t cpu_valid; /* all cpus assigned to pool */
+ cpumask_var_t cpu_valid; /* all cpus assigned to pool */
struct cpupool *next;
unsigned int n_dom;
struct scheduler *sched;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 4b9dc56cfe..3ba5495d11 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -664,7 +664,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
void schedule_dump(struct cpupool *c);
extern void dump_runq(unsigned char key);
-#define num_cpupool_cpus(c) (cpus_weight((c)->cpu_valid))
+#define num_cpupool_cpus(c) cpumask_weight((c)->cpu_valid)
#endif /* __SCHED_H__ */