aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHui Lv <hui.lv@intel.com>2012-03-07 09:34:26 +0000
committerHui Lv <hui.lv@intel.com>2012-03-07 09:34:26 +0000
commit494939f0b0d6941f0982e9ff50e73761ce3890c9 (patch)
tree953be2a17d4b8b9926dd3725b92150149ee72c23
parent0acd2b547df09222cd7d3c0c7aad2ac178c61130 (diff)
downloadxen-494939f0b0d6941f0982e9ff50e73761ce3890c9.tar.gz
xen-494939f0b0d6941f0982e9ff50e73761ce3890c9.tar.bz2
xen-494939f0b0d6941f0982e9ff50e73761ce3890c9.zip
sched_credit: Use delay to control scheduling frequency
This patch can improve Xen performance: 1. Basically, the "delay method" can achieve 11% overall performance boost for SPECvirt than original credit scheduler. 2. We have tried 1ms delay and 10ms delay, there is no big difference between these two configurations. (1ms is enough to achieve a good performance) 3. We have compared different load level response time/latency (low, high, peak), "delay method" didn't bring very much response time increase. 4. 1ms delay can reduce 30% context switch at peak performance, where produces the benefits. (int sched_ratelimit_us = 1000 is the recommended setting) Signed-off-by: Hui Lv <hui.lv@intel.com> Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com> Acked-by: George Dunlap <george.dunlap@eu.citrix.com> Committed-by: Keir Fraser <keir@xen.org> xen-unstable changeset: 24518:44c2856b1952 xen-unstable date: Tue Jan 17 11:18:48 2012 +0000
-rw-r--r--xen/common/sched_credit.c47
-rw-r--r--xen/common/schedule.c5
-rw-r--r--xen/include/xen/perfc_defn.h1
-rw-r--r--xen/include/xen/sched-if.h5
4 files changed, 57 insertions, 1 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index b3d4f11391..e7a3fef6ea 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -172,6 +172,7 @@ struct csched_private {
uint32_t credit;
int credit_balance;
uint32_t runq_sort;
+ unsigned ratelimit_us;
/* Period of master and tick in milliseconds */
unsigned tslice_ms, tick_period_us, ticks_per_tslice;
unsigned credits_per_tslice;
@@ -1298,10 +1299,15 @@ csched_schedule(
struct csched_private *prv = CSCHED_PRIV(ops);
struct csched_vcpu *snext;
struct task_slice ret;
+ s_time_t runtime, tslice;
CSCHED_STAT_CRANK(schedule);
CSCHED_VCPU_CHECK(current);
+ runtime = now - current->runstate.state_entry_time;
+ if ( runtime < 0 ) /* Does this ever happen? */
+ runtime = 0;
+
if ( !is_idle_vcpu(scurr->vcpu) )
{
/* Update credits of a non-idle VCPU. */
@@ -1314,6 +1320,35 @@ csched_schedule(
scurr->pri = CSCHED_PRI_IDLE;
}
+ /* Choices, choices:
+ * - If we have a tasklet, we need to run the idle vcpu no matter what.
+ * - If sched rate limiting is in effect, and the current vcpu has
+ * run for less than that amount of time, continue the current one,
+ * but with a shorter timeslice and return it immediately
+ * - Otherwise, chose the one with the highest priority (which may
+ * be the one currently running)
+ * - If the currently running one is TS_OVER, see if there
+ * is a higher priority one waiting on the runqueue of another
+ * cpu and steal it.
+ */
+
+ /* If we have schedule rate limiting enabled, check to see
+ * how long we've run for. */
+ if ( !tasklet_work_scheduled
+ && prv->ratelimit_us
+ && vcpu_runnable(current)
+ && !is_idle_vcpu(current)
+ && runtime < MICROSECS(prv->ratelimit_us) )
+ {
+ snext = scurr;
+ snext->start_time += now;
+ perfc_incr(delay_ms);
+ tslice = MICROSECS(prv->ratelimit_us);
+ ret.migrated = 0;
+ goto out;
+ }
+ tslice = MILLISECS(prv->tslice_ms);
+
/*
* Select next runnable local VCPU (ie top of local runq)
*/
@@ -1368,11 +1403,12 @@ csched_schedule(
if ( !is_idle_vcpu(snext->vcpu) )
snext->start_time += now;
+out:
/*
* Return task to run next...
*/
ret.time = (is_idle_vcpu(snext->vcpu) ?
- -1 : MILLISECS(prv->tslice_ms));
+ -1 : tslice);
ret.task = snext->vcpu;
CSCHED_VCPU_CHECK(ret.task);
@@ -1522,6 +1558,15 @@ csched_init(struct scheduler *ops)
prv->tick_period_us = prv->tslice_ms * 1000 / prv->ticks_per_tslice;
prv->credits_per_tslice = CSCHED_CREDITS_PER_MSEC * prv->tslice_ms;
+ if ( MICROSECS(sched_ratelimit_us) > MILLISECS(sched_credit_tslice_ms) )
+ {
+ printk("WARNING: sched_ratelimit_us >"
+ "sched_credit_tslice_ms is undefined\n"
+ "Setting ratelimit_us to 1000 * tslice_ms\n");
+ prv->ratelimit_us = 1000 * prv->tslice_ms;
+ }
+ else
+ prv->ratelimit_us = sched_ratelimit_us;
return 0;
}
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index bbdba0163e..907ea09a76 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -47,6 +47,11 @@ string_param("sched", opt_sched);
bool_t sched_smt_power_savings = 0;
boolean_param("sched_smt_power_savings", sched_smt_power_savings);
+/* Default scheduling rate limit: 1ms
+ * The behavior when sched_ratelimit_us is greater than sched_credit_tslice_ms is undefined
+ * */
+int sched_ratelimit_us = 1000;
+integer_param("sched_ratelimit_us", sched_ratelimit_us);
/* Various timer handlers. */
static void s_timer_fn(void *unused);
static void vcpu_periodic_timer_fn(void *data);
diff --git a/xen/include/xen/perfc_defn.h b/xen/include/xen/perfc_defn.h
index d166e2269b..0aef235f9d 100644
--- a/xen/include/xen/perfc_defn.h
+++ b/xen/include/xen/perfc_defn.h
@@ -16,6 +16,7 @@ PERFCOUNTER(sched_irq, "sched: timer")
PERFCOUNTER(sched_run, "sched: runs through scheduler")
PERFCOUNTER(sched_ctx, "sched: context switches")
+PERFCOUNTER(delay_ms, "csched: delay")
PERFCOUNTER(vcpu_check, "csched: vcpu_check")
PERFCOUNTER(schedule, "csched: schedule")
PERFCOUNTER(acct_run, "csched: acct_run")
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 702910b1b2..160ba425bc 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -16,6 +16,11 @@ extern struct cpupool *cpupool0;
/* cpus currently in no cpupool */
extern cpumask_t cpupool_free_cpus;
+/* Scheduler generic parameters
+ * */
+extern int sched_ratelimit_us;
+
+
/*
* In order to allow a scheduler to remap the lock->cpu mapping,
* we have a per-cpu pointer, along with a pre-allocated set of