aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/sched_credit2.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-12-24 08:30:42 +0000
committerKeir Fraser <keir@xen.org>2010-12-24 08:30:42 +0000
commitb423d447aa2c29b88d927363657963cbc44f9482 (patch)
tree7a3d9ad7b2820021b73add279a7a430a9612a3b5 /xen/common/sched_credit2.c
parentf467b7d7032d3ac18e13a43ced3074fa802dc56c (diff)
downloadxen-b423d447aa2c29b88d927363657963cbc44f9482.tar.gz
xen-b423d447aa2c29b88d927363657963cbc44f9482.tar.bz2
xen-b423d447aa2c29b88d927363657963cbc44f9482.zip
credit2: Track expected load
As vcpus are migrated, track how we expect the load to change. This helps smooth migrations when the balancing doesn't take immediate effect on the load average. In theory, if vcpu activity remains constant, then the measured avgload should converge to the balanced avgload. Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/common/sched_credit2.c')
-rw-r--r--xen/common/sched_credit2.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 58f146cc99..6d79a72893 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -206,6 +206,7 @@ struct csched_runqueue_data {
int load; /* Instantaneous load: Length of queue + num non-idle threads */
s_time_t load_last_update; /* Last time average was updated */
s_time_t avgload; /* Decaying queue load */
+ s_time_t b_avgload; /* Decaying queue load modified by balancing */
};
/*
@@ -302,6 +303,7 @@ __update_runq_load(const struct scheduler *ops,
if ( rqd->load_last_update + (1ULL<<prv->load_window_shift) < now )
{
rqd->avgload = (unsigned long long)rqd->load << prv->load_window_shift;
+ rqd->b_avgload = (unsigned long long)rqd->load << prv->load_window_shift;
}
else
{
@@ -310,6 +312,10 @@ __update_runq_load(const struct scheduler *ops,
rqd->avgload =
( ( delta * ( (unsigned long long)rqd->load << prv->load_window_shift ) )
+ ( ((1ULL<<prv->load_window_shift) - delta) * rqd->avgload ) ) >> prv->load_window_shift;
+
+ rqd->b_avgload =
+ ( ( delta * ( (unsigned long long)rqd->load << prv->load_window_shift ) )
+ + ( ((1ULL<<prv->load_window_shift) - delta) * rqd->b_avgload ) ) >> prv->load_window_shift;
}
rqd->load += change;
rqd->load_last_update = now;
@@ -317,11 +323,12 @@ __update_runq_load(const struct scheduler *ops,
{
struct {
unsigned rq_load:4, rq_avgload:28;
- unsigned rq_id:4;
+ unsigned rq_id:4, b_avgload:28;
} d;
d.rq_id=rqd->id;
d.rq_load = rqd->load;
d.rq_avgload = rqd->avgload;
+ d.b_avgload = rqd->b_avgload;
trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1,
sizeof(d),
(unsigned char *)&d);
@@ -756,6 +763,9 @@ __runq_assign(struct csched_vcpu *svc, struct csched_runqueue_data *rqd)
update_max_weight(svc->rqd, svc->weight, 0);
+ /* Expected new load based on adding this vcpu */
+ rqd->b_avgload += svc->avgload;
+
/* TRACE */
{
struct {
@@ -790,6 +800,9 @@ __runq_deassign(struct csched_vcpu *svc)
list_del_init(&svc->rqd_elem);
update_max_weight(svc->rqd, 0, svc->weight);
+ /* Expected new load based on removing this vcpu */
+ svc->rqd->b_avgload -= svc->avgload;
+
svc->rqd = NULL;
}