diff options
author | Keir Fraser <keir@xen.org> | 2010-12-24 08:31:04 +0000 |
---|---|---|
committer | Keir Fraser <keir@xen.org> | 2010-12-24 08:31:04 +0000 |
commit | 32a7d561586e48c52737f2ab66f9015f60616b77 (patch) | |
tree | 89735c63a5b832b5e45c05598388f88955a4fd29 /xen/common/sched_credit2.c | |
parent | b423d447aa2c29b88d927363657963cbc44f9482 (diff) | |
download | xen-32a7d561586e48c52737f2ab66f9015f60616b77.tar.gz xen-32a7d561586e48c52737f2ab66f9015f60616b77.tar.bz2 xen-32a7d561586e48c52737f2ab66f9015f60616b77.zip |
credit2: Migrate request infrastructure
Put in infrastructure to allow a vcpu to requeset to migrate to a
specific runqueue. This will allow a load balancer to choose running
VMs to migrate, and know they will go where expected when the VM is
descheduled.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/common/sched_credit2.c')
-rw-r--r-- | xen/common/sched_credit2.c | 38 |
1 files changed, 35 insertions, 3 deletions
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 6d79a72893..e1a9b07556 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -157,6 +157,12 @@ */ #define __CSFLAG_delayed_runq_add 2 #define CSFLAG_delayed_runq_add (1<<__CSFLAG_delayed_runq_add) +/* CSFLAG_runq_migrate_request: This vcpu is being migrated as a result of a + * credit2-initiated runq migrate request; migrate it to the runqueue indicated + * in the svc struct. + */ +#define __CSFLAG_runq_migrate_request 3 +#define CSFLAG_runq_migrate_request (1<<__CSFLAG_runq_migrate_request) int opt_migrate_resist=500; @@ -247,6 +253,8 @@ struct csched_vcpu { /* Individual contribution to load */ s_time_t load_last_update; /* Last time average was updated */ s_time_t avgload; /* Decaying queue load */ + + struct csched_runqueue_data *migrate_rqd; /* Pre-determined rqd to which to migrate */ }; /* @@ -974,10 +982,10 @@ csched_context_saved(const struct scheduler *ops, struct vcpu *vc) * it seems a bit pointless; especially as we have plenty of * bits free. */ - if ( test_bit(__CSFLAG_delayed_runq_add, &svc->flags) ) + if ( test_and_clear_bit(__CSFLAG_delayed_runq_add, &svc->flags) + && likely(vcpu_runnable(vc)) ) { BUG_ON(__vcpu_on_runq(svc)); - clear_bit(__CSFLAG_delayed_runq_add, &svc->flags); runq_insert(ops, vc->processor, svc); runq_tickle(ops, vc->processor, svc, now); @@ -1015,11 +1023,34 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc) if ( !spin_trylock(&prv->lock) ) { + if ( test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) ) + { + d2printk("d%dv%d -\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id); + clear_bit(__CSFLAG_runq_migrate_request, &svc->flags); + } /* Leave it where it is for now. When we actually pay attention * to affinity we'll have to figure something out... */ return vc->processor; } + /* First check to see if we're here because someone else suggested a place + * for us to move. */ + if ( test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) ) + { + if ( unlikely(svc->migrate_rqd->id < 0) ) + { + printk("%s: Runqueue migrate aborted because target runqueue disappeared!\n", + __func__); + /* Fall-through to normal cpu pick */ + } + else + { + d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id); + new_cpu = first_cpu(svc->migrate_rqd->active); + goto out_up; + } + } + /* FIXME: Pay attention to cpu affinity */ min_load = MAX_LOAD; @@ -1053,7 +1084,8 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc) BUG_ON(cpus_empty(prv->rqd[min_rqi].active)); new_cpu = first_cpu(prv->rqd[min_rqi].active); } - + +out_up: spin_unlock(&prv->lock); return new_cpu; |