diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2010-06-17 07:22:06 +0100 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2010-06-17 07:22:06 +0100 |
commit | 38b998cfb6e8b572a265bb6ba400a1db700e5882 (patch) | |
tree | 48c0ab213ece61a89dffabc452fbdd3285506238 /xen/common/sched_credit.c | |
parent | 56d6b7ac5dedb2413b22713e3760a617a3d5e260 (diff) | |
download | xen-38b998cfb6e8b572a265bb6ba400a1db700e5882.tar.gz xen-38b998cfb6e8b572a265bb6ba400a1db700e5882.tar.bz2 xen-38b998cfb6e8b572a265bb6ba400a1db700e5882.zip |
x86: IRQ affinity should track vCPU affinity
With IRQs getting bound to the CPU the binding vCPU currently runs on
there can result quite a bit of extra cross CPU traffic as soon as
that vCPU moves to a different pCPU. Likewise, when a domain re-binds
an event channel associated with a pIRQ, that IRQ's affinity should
also be adjusted.
The open issue is how to break ties for interrupts shared by multiple
domains - currently, the last request (at any point in time) is being
honored.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/common/sched_credit.c')
-rw-r--r-- | xen/common/sched_credit.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 0aeae90cb7..356020e095 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -1168,7 +1168,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri) static struct csched_vcpu * csched_load_balance(struct csched_private *prv, int cpu, - struct csched_vcpu *snext) + struct csched_vcpu *snext, bool_t *stolen) { struct csched_vcpu *speer; cpumask_t workers; @@ -1221,7 +1221,10 @@ csched_load_balance(struct csched_private *prv, int cpu, speer = csched_runq_steal(peer_cpu, cpu, snext->pri); spin_unlock(per_cpu(schedule_data, peer_cpu).schedule_lock); if ( speer != NULL ) + { + *stolen = 1; return speer; + } } out: @@ -1269,6 +1272,7 @@ csched_schedule( BUG_ON( is_idle_vcpu(current) || list_empty(runq) ); snext = __runq_elem(runq->next); + ret.migrated = 0; /* Tasklet work (which runs in idle VCPU context) overrides all else. */ if ( tasklet_work_scheduled ) @@ -1288,7 +1292,7 @@ csched_schedule( if ( snext->pri > CSCHED_PRI_TS_OVER ) __runq_remove(snext); else - snext = csched_load_balance(prv, cpu, snext); + snext = csched_load_balance(prv, cpu, snext, &ret.migrated); /* * Update idlers mask if necessary. When we're idling, other CPUs |