aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/irq.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2011-05-01 13:16:30 +0100
committerJan Beulich <jbeulich@novell.com>2011-05-01 13:16:30 +0100
commitf22f2fe48d144141fffd42a380383f45efbea8e3 (patch)
tree8e0d2c4787f66363688bc649a9c19a39a41379b2 /xen/arch/x86/irq.c
parent4c7909939b059cd366426acaf9988cc7c5f4c00b (diff)
downloadxen-f22f2fe48d144141fffd42a380383f45efbea8e3.tar.gz
xen-f22f2fe48d144141fffd42a380383f45efbea8e3.tar.bz2
xen-f22f2fe48d144141fffd42a380383f45efbea8e3.zip
x86: replace nr_irqs sized per-domain arrays with radix trees
It would seem possible to fold the two trees into one (making e.g. the emuirq bits stored in the upper half of the pointer), but I'm not certain that's worth it as it would make deletion of entries more cumbersome. Unless pirq-s and emuirq-s were mutually exclusive... Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/arch/x86/irq.c')
-rw-r--r--xen/arch/x86/irq.c104
1 files changed, 88 insertions, 16 deletions
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 68aec1e9e6..f97d489e54 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -950,6 +950,58 @@ struct irq_desc *domain_spin_lock_irq_desc(
return desc;
}
+static int set_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+ int err = radix_tree_insert(&d->arch.irq_pirq, irq, (void *)(long)pirq,
+ NULL, NULL);
+
+ switch ( err )
+ {
+ case -EEXIST:
+ *radix_tree_lookup_slot(&d->arch.irq_pirq, irq) = (void *)(long)pirq;
+ /* fall through */
+ case 0:
+ d->arch.pirq_irq[pirq] = irq;
+ return 0;
+ }
+
+ return err;
+}
+
+static void clear_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+ d->arch.pirq_irq[pirq] = 0;
+ radix_tree_delete(&d->arch.irq_pirq, irq, NULL);
+}
+
+int init_domain_irq_mapping(struct domain *d)
+{
+ unsigned int i;
+ int err;
+
+ INIT_RADIX_TREE(&d->arch.irq_pirq, 0);
+ if ( is_hvm_domain(d) )
+ INIT_RADIX_TREE(&d->arch.hvm_domain.emuirq_pirq, 0);
+
+ for ( i = 1, err = 0; !err && platform_legacy_irq(i); ++i )
+ if ( !IO_APIC_IRQ(i) )
+ err = set_domain_irq_pirq(d, i, i);
+
+ return err;
+}
+
+static void irq_slot_free(void *unused)
+{
+}
+
+void cleanup_domain_irq_mapping(struct domain *d)
+{
+ radix_tree_destroy(&d->arch.irq_pirq, irq_slot_free, NULL);
+ if ( is_hvm_domain(d) )
+ radix_tree_destroy(&d->arch.hvm_domain.emuirq_pirq,
+ irq_slot_free, NULL);
+}
+
/* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
static void flush_ready_eoi(void)
{
@@ -1386,7 +1438,7 @@ void pirq_guest_unbind(struct domain *d, int pirq)
BUG_ON(irq <= 0);
desc = irq_to_desc(irq);
spin_lock_irq(&desc->lock);
- d->arch.pirq_irq[pirq] = d->arch.irq_pirq[irq] = 0;
+ clear_domain_irq_pirq(d, irq, pirq);
}
else
{
@@ -1544,15 +1596,23 @@ int map_domain_pirq(
dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
d->domain_id, irq);
desc->handler = &pci_msi_type;
- d->arch.pirq_irq[pirq] = irq;
- d->arch.irq_pirq[irq] = pirq;
- setup_msi_irq(pdev, msi_desc, irq);
- spin_unlock_irqrestore(&desc->lock, flags);
- } else
+ ret = set_domain_irq_pirq(d, irq, pirq);
+ if ( !ret )
+ {
+ setup_msi_irq(pdev, msi_desc, irq);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+ else
+ {
+ desc->handler = &no_irq_type;
+ spin_unlock_irqrestore(&desc->lock, flags);
+ pci_disable_msi(msi_desc);
+ }
+ }
+ else
{
spin_lock_irqsave(&desc->lock, flags);
- d->arch.pirq_irq[pirq] = irq;
- d->arch.irq_pirq[irq] = pirq;
+ ret = set_domain_irq_pirq(d, irq, pirq);
spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -1599,14 +1659,11 @@ int unmap_domain_pirq(struct domain *d, int pirq)
BUG_ON(irq != domain_pirq_to_irq(d, pirq));
if ( !forced_unbind )
- {
- d->arch.pirq_irq[pirq] = 0;
- d->arch.irq_pirq[irq] = 0;
- }
+ clear_domain_irq_pirq(d, irq, pirq);
else
{
d->arch.pirq_irq[pirq] = -irq;
- d->arch.irq_pirq[irq] = -pirq;
+ *radix_tree_lookup_slot(&d->arch.irq_pirq, irq) = (void *)(long)-pirq;
}
spin_unlock_irqrestore(&desc->lock, flags);
@@ -1829,10 +1886,25 @@ int map_domain_emuirq_pirq(struct domain *d, int pirq, int emuirq)
return 0;
}
- d->arch.pirq_emuirq[pirq] = emuirq;
/* do not store emuirq mappings for pt devices */
if ( emuirq != IRQ_PT )
- d->arch.emuirq_pirq[emuirq] = pirq;
+ {
+ int err = radix_tree_insert(&d->arch.hvm_domain.emuirq_pirq, emuirq,
+ (void *)((long)pirq + 1), NULL, NULL);
+
+ switch ( err )
+ {
+ case 0:
+ break;
+ case -EEXIST:
+ *radix_tree_lookup_slot(&d->arch.hvm_domain.emuirq_pirq, emuirq) =
+ (void *)((long)pirq + 1);
+ break;
+ default:
+ return err;
+ }
+ }
+ d->arch.pirq_emuirq[pirq] = emuirq;
return 0;
}
@@ -1860,7 +1932,7 @@ int unmap_domain_pirq_emuirq(struct domain *d, int pirq)
d->arch.pirq_emuirq[pirq] = IRQ_UNBOUND;
if ( emuirq != IRQ_PT )
- d->arch.emuirq_pirq[emuirq] = IRQ_UNBOUND;
+ radix_tree_delete(&d->arch.hvm_domain.emuirq_pirq, emuirq, NULL);
done:
return ret;