aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/sched_credit.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-09-20 18:49:15 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-09-20 18:49:15 +0100
commit6d6b9b99aa0b6df81ffd8ae7e6d99e0aca8aa645 (patch)
tree00c48a3ecbe21674947bf723acf215d68ce56ff4 /xen/common/sched_credit.c
parent744d6961f91581f603c0f285f52c4b23fa30ad4c (diff)
downloadxen-6d6b9b99aa0b6df81ffd8ae7e6d99e0aca8aa645.tar.gz
xen-6d6b9b99aa0b6df81ffd8ae7e6d99e0aca8aa645.tar.bz2
xen-6d6b9b99aa0b6df81ffd8ae7e6d99e0aca8aa645.zip
sched_credit: Raise bar for inter-socket migrations on mostly-idle systems
The credit scheduler ties to keep work balanced, even on a mostly idle system. Unfortunately, if you have one VM burning cpu and another VM idle, the effect is that the busy VM will flip back and forth between sockets. This patch addresses this, by only migrating to a different socket if the number of idle processors is twice that of the socket the vcpu is currently on. This will only affect mostly-idle systems; as the system becomes more busy, other load-balancing code will come into effect. Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/common/sched_credit.c')
-rw-r--r--xen/common/sched_credit.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 2a7e299f11..64369c3462 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -499,26 +499,36 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
cpumask_t cpu_idlers;
cpumask_t nxt_idlers;
int nxt, weight_cpu, weight_nxt;
+ int migrate_factor;
nxt = cycle_cpu(cpu, cpus);
if ( cpu_isset(cpu, per_cpu(cpu_core_map, nxt)) )
{
+ /* We're on the same socket, so check the busy-ness of threads.
+ * Migrate if # of idlers is less at all */
ASSERT( cpu_isset(nxt, per_cpu(cpu_core_map, cpu)) );
+ migrate_factor = 1;
cpus_and(cpu_idlers, idlers, per_cpu(cpu_sibling_map, cpu));
cpus_and(nxt_idlers, idlers, per_cpu(cpu_sibling_map, nxt));
}
else
{
+ /* We're on different sockets, so check the busy-ness of cores.
+ * Migrate only if the other core is twice as idle */
ASSERT( !cpu_isset(nxt, per_cpu(cpu_core_map, cpu)) );
+ migrate_factor = 2;
cpus_and(cpu_idlers, idlers, per_cpu(cpu_core_map, cpu));
cpus_and(nxt_idlers, idlers, per_cpu(cpu_core_map, nxt));
}
weight_cpu = cpus_weight(cpu_idlers);
weight_nxt = cpus_weight(nxt_idlers);
- if ( ( (weight_cpu < weight_nxt) ^ sched_smt_power_savings )
- && (weight_cpu != weight_nxt) )
+ /* smt_power_savings: consolidate work rather than spreading it */
+ if ( ( sched_smt_power_savings
+ && (weight_cpu > weight_nxt) )
+ || ( !sched_smt_power_savings
+ && (weight_cpu * migrate_factor < weight_nxt) ) )
{
cpu = cycle_cpu(CSCHED_PCPU(nxt)->idle_bias, nxt_idlers);
if ( commit )