diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2010-08-09 16:36:07 +0100 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2010-08-09 16:36:07 +0100 |
commit | 5fc9f5ef6c083641b55ddcab8cb6a041656fb3bd (patch) | |
tree | eed54fa9af843194907ba1dd1cb67bda2118a564 /xen/common/sched_credit.c | |
parent | 49094cac1b418fef528c3d3991230eccb4cdd513 (diff) | |
download | xen-5fc9f5ef6c083641b55ddcab8cb6a041656fb3bd.tar.gz xen-5fc9f5ef6c083641b55ddcab8cb6a041656fb3bd.tar.bz2 xen-5fc9f5ef6c083641b55ddcab8cb6a041656fb3bd.zip |
scheduler: Implement yield for credit1
This patch implements 'yield' for credit1. It does this by attempting
to put yielding vcpu behind a single lower-priority vcpu on the
runqueue. If no lower-priority vcpus are in the queue, it will go at
the back (which if the queue is empty, will also be the front).
Runqueues are sorted every 30ms, so that's the longest this priority
inversion can happen.
For workloads with heavy concurrency hazard, and guest which implement
yield-on-spinlock, this patch significantly increases performance and
total system throughput.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/common/sched_credit.c')
-rw-r--r-- | xen/common/sched_credit.c | 40 |
1 files changed, 39 insertions, 1 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 356020e095..c2c0f2bf2f 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -64,7 +64,8 @@ /* * Flags */ -#define CSCHED_FLAG_VCPU_PARKED 0x0001 /* VCPU over capped credits */ +#define CSCHED_FLAG_VCPU_PARKED 0x0001 /* VCPU over capped credits */ +#define CSCHED_FLAG_VCPU_YIELD 0x0002 /* VCPU yielding */ /* @@ -108,6 +109,12 @@ /* + * Boot parameters + */ +int sched_credit_default_yield = 0; +boolean_param("sched_credit_default_yield", sched_credit_default_yield); + +/* * Physical CPU */ struct csched_pcpu { @@ -202,6 +209,18 @@ __runq_insert(unsigned int cpu, struct csched_vcpu *svc) break; } + /* If the vcpu yielded, try to put it behind one lower-priority + * runnable vcpu if we can. The next runq_sort will bring it forward + * within 30ms if the queue too long. */ + if ( svc->flags & CSCHED_FLAG_VCPU_YIELD + && __runq_elem(iter)->pri > CSCHED_PRI_IDLE ) + { + iter=iter->next; + + /* Some sanity checks */ + BUG_ON(iter == runq); + } + list_add_tail(&svc->runq_elem, iter); } @@ -748,6 +767,18 @@ csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) __runq_tickle(cpu, svc); } +static void +csched_vcpu_yield(const struct scheduler *ops, struct vcpu *vc) +{ + struct csched_vcpu * const sv = CSCHED_VCPU(vc); + + if ( !sched_credit_default_yield ) + { + /* Let the scheduler know that this vcpu is trying to yield */ + sv->flags |= CSCHED_FLAG_VCPU_YIELD; + } +} + static int csched_dom_cntl( const struct scheduler *ops, @@ -1282,6 +1313,12 @@ csched_schedule( } /* + * Clear YIELD flag before scheduling out + */ + if ( scurr->flags & CSCHED_FLAG_VCPU_YIELD ) + scurr->flags &= ~(CSCHED_FLAG_VCPU_YIELD); + + /* * SMP Load balance: * * If the next highest priority local runnable VCPU has already eaten @@ -1509,6 +1546,7 @@ const struct scheduler sched_credit_def = { .sleep = csched_vcpu_sleep, .wake = csched_vcpu_wake, + .yield = csched_vcpu_yield, .adjust = csched_dom_cntl, |