aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/sched_credit2.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-12-24 08:31:24 +0000
committerKeir Fraser <keir@xen.org>2010-12-24 08:31:24 +0000
commitbc24ee57573bc613e428fd3e1f3698807d1700de (patch)
treeee391286883c240bc3ad05622b707e736afdc9bb /xen/common/sched_credit2.c
parent32a7d561586e48c52737f2ab66f9015f60616b77 (diff)
downloadxen-bc24ee57573bc613e428fd3e1f3698807d1700de.tar.gz
xen-bc24ee57573bc613e428fd3e1f3698807d1700de.tar.bz2
xen-bc24ee57573bc613e428fd3e1f3698807d1700de.zip
credit2: Use loadavg to pick cpus, instead of instantaneous load
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/common/sched_credit2.c')
-rw-r--r--xen/common/sched_credit2.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index e1a9b07556..ffc937f943 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -996,13 +996,14 @@ csched_context_saved(const struct scheduler *ops, struct vcpu *vc)
vcpu_schedule_unlock_irq(vc);
}
-#define MAX_LOAD (1<<30);
+#define MAX_LOAD (1ULL<<60);
static int
choose_cpu(const struct scheduler *ops, struct vcpu *vc)
{
struct csched_private *prv = CSCHED_PRIV(ops);
- int i, min_load, min_rqi = -1, new_cpu;
+ int i, min_rqi = -1, new_cpu;
struct csched_vcpu *svc = CSCHED_VCPU(vc);
+ s_time_t min_avgload;
BUG_ON(cpus_empty(prv->active_queues));
@@ -1053,27 +1054,39 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
/* FIXME: Pay attention to cpu affinity */
- min_load = MAX_LOAD;
+ min_avgload = MAX_LOAD;
/* Find the runqueue with the lowest instantaneous load */
for_each_cpu_mask(i, prv->active_queues)
{
struct csched_runqueue_data *rqd;
+ s_time_t rqd_avgload;
rqd = prv->rqd + i;
/* If checking a different runqueue, grab the lock,
- * read the avg, and then release the lock. */
- if ( rqd != svc->rqd
- && ! spin_trylock(&rqd->lock) )
+ * read the avg, and then release the lock.
+ *
+ * If on our own runqueue, don't grab or release the lock;
+ * but subtract our own load from the runqueue load to simulate
+ * impartiality */
+ if ( rqd == svc->rqd )
+ {
+ rqd_avgload = rqd->b_avgload - svc->avgload;
+ }
+ else if ( spin_trylock(&rqd->lock) )
+ {
+ rqd_avgload = rqd->b_avgload;
+ spin_unlock(&rqd->lock);
+ }
+ else
continue;
- if ( prv->rqd[i].load < min_load )
+
+ if ( rqd_avgload < min_avgload )
{
- min_load=prv->rqd[i].load;
+ min_avgload = rqd_avgload;
min_rqi=i;
}
- if ( rqd != svc->rqd )
- spin_unlock(&rqd->lock);
}
/* We didn't find anyone (most likely because of spinlock contention); leave it where it is */