aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/irq.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-11-26 10:10:40 +0000
committerKeir Fraser <keir@xen.org>2010-11-26 10:10:40 +0000
commit353c65ce23877d10b13b6ce93725509b1564cff7 (patch)
tree99d50da91a89c18f525caf8c31ad591bbce50579 /xen/arch/x86/irq.c
parent0bdb3ec667740cbf84fcf41d843f056b428ecdaa (diff)
downloadxen-353c65ce23877d10b13b6ce93725509b1564cff7.tar.gz
xen-353c65ce23877d10b13b6ce93725509b1564cff7.tar.bz2
xen-353c65ce23877d10b13b6ce93725509b1564cff7.zip
Allow assign_irq_vector to return an old vector while moving an irq
The guest calls assign_irq_vector() to assign one if it doesn't have one, and to find out the vector if it does have one. If the cpu mask passed intersects with the existing mask, the old vector is simply returned. However, if the irq happens to be in transit at the time, this returns EBUSY. This is unnecessary if, as soon as the irq migration succeeds, the logic would just return the old vector anyway. This patch makes two changes: * Switch the checks, so if the mask overlaps it always returns * Return -EAGAIN instead of -EBUSY for moving irqs, to let the caller know that the failure is temporary and may work if repeated. This fixes a bug where on certain hardware, using the credit2 scheduler, a pvops kernel with multiple vcpus doesn't boot. Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/arch/x86/irq.c')
-rw-r--r--xen/arch/x86/irq.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 6ac0cd3438..fa63de2ab0 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -348,9 +348,6 @@ int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
int cpu, err;
cpumask_t tmp_mask;
- if ((cfg->move_in_progress) || cfg->move_cleanup_count)
- return -EBUSY;
-
old_vector = irq_to_vector(irq);
if (old_vector) {
cpus_and(tmp_mask, mask, cpu_online_map);
@@ -361,6 +358,9 @@ int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
}
}
+ if ((cfg->move_in_progress) || cfg->move_cleanup_count)
+ return -EAGAIN;
+
/* Only try and allocate irqs on cpus that are present */
cpus_and(mask, mask, cpu_online_map);