aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/schedule.c
diff options
context:
space:
mode:
authorGeorge Dunlap <george.dunlap@eu.citrix.com>2012-04-10 10:42:35 +0100
committerGeorge Dunlap <george.dunlap@eu.citrix.com>2012-04-10 10:42:35 +0100
commitdbfa7bba0f213b1802e1900b71bc34837c30ee52 (patch)
tree611b2cca35a792dfe9da22ae1bb39935414dbd30 /xen/common/schedule.c
parent7d302763ac7b0ecefb437ea45ee5b8d27a20d016 (diff)
downloadxen-dbfa7bba0f213b1802e1900b71bc34837c30ee52.tar.gz
xen-dbfa7bba0f213b1802e1900b71bc34837c30ee52.tar.bz2
xen-dbfa7bba0f213b1802e1900b71bc34837c30ee52.zip
xen, cpupools: Fix cpupool-move to make more consistent
The full order for creating new private data structures when moving from one pool to another is now: * Allocate all new structures - Allocate a new private domain structure (but don't point there yet) - Allocate per-vcpu data structures (but don't point there yet) * Remove old structures - Remove each vcpu, freeing the associated data structure - Free the domain data structure * Switch to the new structures - Set the domain to the new cpupool, with the new private domain structure - Set each vcpu to the respective new structure, and insert This is in line with a (fairly reasonable) assumption in credit2 that the private structure of the domain will be the private structure pointed to by the per-vcpu private structure. Also fix a bug, in which insert_vcpu was called with the *old* vcpu ops rather than the new ones. Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com> Committed-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common/schedule.c')
-rw-r--r--xen/common/schedule.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 724e8fa1ab..0854f55bf5 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -261,6 +261,18 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
domain_pause(d);
+ for_each_vcpu ( d, v )
+ {
+ SCHED_OP(VCPU2OP(v), remove_vcpu, v);
+ SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
+ v->sched_priv = NULL;
+ }
+
+ SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
+
+ d->cpupool = c;
+ d->sched_priv = domdata;
+
new_p = cpumask_first(c->cpu_valid);
for_each_vcpu ( d, v )
{
@@ -268,9 +280,6 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
migrate_timer(&v->singleshot_timer, new_p);
migrate_timer(&v->poll_timer, new_p);
- SCHED_OP(VCPU2OP(v), remove_vcpu, v);
- SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
-
cpumask_setall(v->cpu_affinity);
v->processor = new_p;
v->sched_priv = vcpu_priv[v->vcpu_id];
@@ -278,13 +287,9 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
new_p = cpumask_cycle(new_p, c->cpu_valid);
- SCHED_OP(VCPU2OP(v), insert_vcpu, v);
+ SCHED_OP(c->sched, insert_vcpu, v);
}
- d->cpupool = c;
- SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
- d->sched_priv = domdata;
-
domain_update_node_affinity(d);
domain_unpause(d);