diff options
author | Keir Fraser <keir@xen.org> | 2010-10-24 13:09:50 +0100 |
---|---|---|
committer | Keir Fraser <keir@xen.org> | 2010-10-24 13:09:50 +0100 |
commit | 7e6b926a17db1d63f1b8fd2745776f5369dacb4e (patch) | |
tree | 753bf45413056dc82c8d309d919a048149ff802b | |
parent | 8e5c88afb0fb552ba803fecc1952d01293442d7d (diff) | |
download | xen-7e6b926a17db1d63f1b8fd2745776f5369dacb4e.tar.gz xen-7e6b926a17db1d63f1b8fd2745776f5369dacb4e.tar.bz2 xen-7e6b926a17db1d63f1b8fd2745776f5369dacb4e.zip |
cpupools: Make interface more consistent
The current cpupools code interface is a bit inconsistent. This
patch addresses this by making the interaction for each
vcpu in a pool look like this:
alloc_vdata() -- allocates and sets up vcpu data
insert_vcpu() -- the vcpu is ready to run in this pool
remove_vcpu() -- take the vcpu out of the pool
free_vdata() -- delete allocated vcpu data
(Previously, remove_vcpu and free_vdata were combined into a "destroy
vcpu", and insert_vcpu was only called for idle vcpus.)
This also addresses a bug in credit2 which was caused by a
misunderstanding of the cpupools interface.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Acked-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
-rw-r--r-- | xen/common/sched_credit.c | 30 | ||||
-rw-r--r-- | xen/common/sched_credit2.c | 29 | ||||
-rw-r--r-- | xen/common/sched_sedf.c | 7 | ||||
-rw-r--r-- | xen/common/schedule.c | 10 | ||||
-rw-r--r-- | xen/include/xen/sched-if.h | 3 |
5 files changed, 38 insertions, 41 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 64369c3462..aad47706a9 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -677,35 +677,35 @@ csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) static void csched_free_vdata(const struct scheduler *ops, void *priv) { - struct csched_private *prv = CSCHED_PRIV(ops); struct csched_vcpu *svc = priv; - unsigned long flags; - - if ( __vcpu_on_runq(svc) ) - __runq_remove(svc); - - spin_lock_irqsave(&(prv->lock), flags); - if ( !list_empty(&svc->active_vcpu_elem) ) - __csched_vcpu_acct_stop_locked(prv, svc); - - spin_unlock_irqrestore(&(prv->lock), flags); + BUG_ON( !list_empty(&svc->runq_elem) ); xfree(svc); } static void -csched_vcpu_destroy(const struct scheduler *ops, struct vcpu *vc) +csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) { + struct csched_private *prv = CSCHED_PRIV(ops); struct csched_vcpu * const svc = CSCHED_VCPU(vc); struct csched_dom * const sdom = svc->sdom; + unsigned long flags; CSCHED_STAT_CRANK(vcpu_destroy); + if ( __vcpu_on_runq(svc) ) + __runq_remove(svc); + + spin_lock_irqsave(&(prv->lock), flags); + + if ( !list_empty(&svc->active_vcpu_elem) ) + __csched_vcpu_acct_stop_locked(prv, svc); + + spin_unlock_irqrestore(&(prv->lock), flags); + BUG_ON( sdom == NULL ); BUG_ON( !list_empty(&svc->runq_elem) ); - - csched_free_vdata(ops, svc); } static void @@ -1561,7 +1561,7 @@ const struct scheduler sched_credit_def = { .destroy_domain = csched_dom_destroy, .insert_vcpu = csched_vcpu_insert, - .destroy_vcpu = csched_vcpu_destroy, + .remove_vcpu = csched_vcpu_remove, .sleep = csched_vcpu_sleep, .wake = csched_vcpu_wake, diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 6ccc39216a..0bf81c9d4c 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -592,7 +592,18 @@ static void csched_free_vdata(const struct scheduler *ops, void *priv) { struct csched_vcpu *svc = priv; - struct vcpu *vc = svc->vcpu; + + xfree(svc); +} + +static void +csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) +{ + struct csched_vcpu * const svc = CSCHED_VCPU(vc); + struct csched_dom * const sdom = svc->sdom; + + BUG_ON( sdom == NULL ); + BUG_ON( !list_empty(&svc->runq_elem) ); if ( ! is_idle_vcpu(vc) ) { @@ -610,20 +621,6 @@ csched_free_vdata(const struct scheduler *ops, void *priv) svc->sdom->nr_vcpus--; } - - xfree(svc); -} - -static void -csched_vcpu_destroy(const struct scheduler *ops, struct vcpu *vc) -{ - struct csched_vcpu * const svc = CSCHED_VCPU(vc); - struct csched_dom * const sdom = svc->sdom; - - BUG_ON( sdom == NULL ); - BUG_ON( !list_empty(&svc->runq_elem) ); - - csched_free_vdata(ops, svc); } static void @@ -1199,7 +1196,7 @@ const struct scheduler sched_credit2_def = { .destroy_domain = csched_dom_destroy, .insert_vcpu = csched_vcpu_insert, - .destroy_vcpu = csched_vcpu_destroy, + .remove_vcpu = csched_vcpu_remove, .sleep = csched_vcpu_sleep, .wake = csched_vcpu_wake, diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c index be71432de9..7dde35b912 100644 --- a/xen/common/sched_sedf.c +++ b/xen/common/sched_sedf.c @@ -410,11 +410,6 @@ static void sedf_free_vdata(const struct scheduler *ops, void *priv) xfree(priv); } -static void sedf_destroy_vcpu(const struct scheduler *ops, struct vcpu *v) -{ - sedf_free_vdata(ops, v->sched_priv); -} - static void * sedf_alloc_domdata(const struct scheduler *ops, struct domain *d) { @@ -1504,8 +1499,6 @@ const struct scheduler sched_sedf_def = { .init_domain = sedf_init_domain, .destroy_domain = sedf_destroy_domain, - .destroy_vcpu = sedf_destroy_vcpu, - .alloc_vdata = sedf_alloc_vdata, .free_vdata = sedf_free_vdata, .alloc_pdata = sedf_alloc_pdata, diff --git a/xen/common/schedule.c b/xen/common/schedule.c index d8bc5f025f..fcf2cf4df3 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -219,6 +219,8 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor) if ( v->sched_priv == NULL ) return 1; + SCHED_OP(VCPU2OP(v), insert_vcpu, v); + return 0; } @@ -266,7 +268,8 @@ int sched_move_domain(struct domain *d, struct cpupool *c) migrate_timer(&v->singleshot_timer, new_p); migrate_timer(&v->poll_timer, new_p); - SCHED_OP(VCPU2OP(v), destroy_vcpu, v); + SCHED_OP(VCPU2OP(v), remove_vcpu, v); + SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv); cpus_setall(v->cpu_affinity); v->processor = new_p; @@ -274,6 +277,8 @@ int sched_move_domain(struct domain *d, struct cpupool *c) evtchn_move_pirqs(v); new_p = cycle_cpu(new_p, c->cpu_valid); + + SCHED_OP(VCPU2OP(v), insert_vcpu, v); } domain_update_node_affinity(d); @@ -295,7 +300,8 @@ void sched_destroy_vcpu(struct vcpu *v) kill_timer(&v->poll_timer); if ( test_and_clear_bool(v->is_urgent) ) atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count); - SCHED_OP(VCPU2OP(v), destroy_vcpu, v); + SCHED_OP(VCPU2OP(v), remove_vcpu, v); + SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv); } int sched_init_domain(struct domain *d) diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index 3808344d11..3a7fbc4407 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -102,8 +102,9 @@ struct scheduler { int (*init_domain) (const struct scheduler *, struct domain *); void (*destroy_domain) (const struct scheduler *, struct domain *); + /* Activate / deactivate vcpus in a cpu pool */ void (*insert_vcpu) (const struct scheduler *, struct vcpu *); - void (*destroy_vcpu) (const struct scheduler *, struct vcpu *); + void (*remove_vcpu) (const struct scheduler *, struct vcpu *); void (*sleep) (const struct scheduler *, struct vcpu *); void (*wake) (const struct scheduler *, struct vcpu *); |