aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/domain.c20
-rw-r--r--xen/arch/x86/hpet.c90
-rw-r--r--xen/arch/x86/hvm/vmsi.c4
-rw-r--r--xen/arch/x86/i8259.c65
-rw-r--r--xen/arch/x86/io_apic.c194
-rw-r--r--xen/arch/x86/irq.c533
-rw-r--r--xen/arch/x86/msi.c118
-rw-r--r--xen/arch/x86/physdev.c38
-rw-r--r--xen/arch/x86/setup.c2
-rw-r--r--xen/drivers/passthrough/amd/iommu_init.c56
-rw-r--r--xen/drivers/passthrough/io.c49
-rw-r--r--xen/drivers/passthrough/pci.c4
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c79
-rw-r--r--xen/drivers/passthrough/vtd/x86/vtd.c2
-rw-r--r--xen/include/asm-x86/amd-iommu.h2
-rw-r--r--xen/include/asm-x86/domain.h6
-rw-r--r--xen/include/asm-x86/irq.h30
-rw-r--r--xen/include/asm-x86/mach-default/irq_vectors.h1
-rw-r--r--xen/include/asm-x86/msi.h14
-rw-r--r--xen/include/xen/hvm/irq.h2
-rw-r--r--xen/include/xen/iommu.h2
-rw-r--r--xen/include/xen/irq.h31
22 files changed, 742 insertions, 600 deletions
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 064f5dc214..64e029065a 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -474,11 +474,17 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
share_xen_page_with_guest(
virt_to_page(d->shared_info), d, XENSHARE_writable);
- d->arch.pirq_vector = xmalloc_array(s16, d->nr_pirqs);
- if ( !d->arch.pirq_vector )
+ d->arch.pirq_irq = xmalloc_array(int, d->nr_pirqs);
+ if ( !d->arch.pirq_irq )
goto fail;
- memset(d->arch.pirq_vector, 0,
- d->nr_pirqs * sizeof(*d->arch.pirq_vector));
+ memset(d->arch.pirq_irq, 0,
+ d->nr_pirqs * sizeof(*d->arch.pirq_irq));
+
+ d->arch.irq_pirq = xmalloc_array(int, nr_irqs);
+ if ( !d->arch.irq_pirq )
+ goto fail;
+ memset(d->arch.irq_pirq, 0,
+ nr_irqs * sizeof(*d->arch.irq_pirq));
if ( (rc = iommu_domain_init(d)) != 0 )
goto fail;
@@ -513,7 +519,8 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
fail:
d->is_dying = DOMDYING_dead;
- xfree(d->arch.pirq_vector);
+ xfree(d->arch.pirq_irq);
+ xfree(d->arch.irq_pirq);
free_xenheap_page(d->shared_info);
if ( paging_initialised )
paging_final_teardown(d);
@@ -562,7 +569,8 @@ void arch_domain_destroy(struct domain *d)
#endif
free_xenheap_page(d->shared_info);
- xfree(d->arch.pirq_vector);
+ xfree(d->arch.pirq_irq);
+ xfree(d->arch.irq_pirq);
}
unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4)
diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c
index 5ea752348b..0f3f48b3a3 100644
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -38,7 +38,7 @@ struct hpet_event_channel
unsigned int idx; /* physical channel idx */
int cpu; /* msi target */
- unsigned int vector;/* msi vector */
+ unsigned int irq;/* msi irq */
unsigned int flags; /* HPET_EVT_x */
} __cacheline_aligned;
static struct hpet_event_channel legacy_hpet_event;
@@ -47,13 +47,13 @@ static unsigned int num_hpets_used; /* msi hpet channels used for broadcast */
DEFINE_PER_CPU(struct hpet_event_channel *, cpu_bc_channel);
-static int vector_channel[NR_VECTORS] = {[0 ... NR_VECTORS-1] = -1};
+static int *irq_channel;
-#define vector_to_channel(vector) vector_channel[vector]
+#define irq_to_channel(irq) irq_channel[irq]
unsigned long hpet_address;
-void msi_compose_msg(struct pci_dev *pdev, int vector, struct msi_msg *msg);
+void msi_compose_msg(struct pci_dev *pdev, int irq, struct msi_msg *msg);
/*
* force_hpet_broadcast: by default legacy hpet broadcast will be stopped
@@ -208,7 +208,7 @@ again:
spin_unlock_irq(&ch->lock);
}
-static void hpet_interrupt_handler(int vector, void *data,
+static void hpet_interrupt_handler(int irq, void *data,
struct cpu_user_regs *regs)
{
struct hpet_event_channel *ch = (struct hpet_event_channel *)data;
@@ -221,10 +221,10 @@ static void hpet_interrupt_handler(int vector, void *data,
ch->event_handler(ch);
}
-static void hpet_msi_unmask(unsigned int vector)
+static void hpet_msi_unmask(unsigned int irq)
{
unsigned long cfg;
- int ch_idx = vector_to_channel(vector);
+ int ch_idx = irq_to_channel(irq);
struct hpet_event_channel *ch;
BUG_ON(ch_idx < 0);
@@ -235,10 +235,10 @@ static void hpet_msi_unmask(unsigned int vector)
hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
}
-static void hpet_msi_mask(unsigned int vector)
+static void hpet_msi_mask(unsigned int irq)
{
unsigned long cfg;
- int ch_idx = vector_to_channel(vector);
+ int ch_idx = irq_to_channel(irq);
struct hpet_event_channel *ch;
BUG_ON(ch_idx < 0);
@@ -249,9 +249,9 @@ static void hpet_msi_mask(unsigned int vector)
hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
}
-static void hpet_msi_write(unsigned int vector, struct msi_msg *msg)
+static void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
{
- int ch_idx = vector_to_channel(vector);
+ int ch_idx = irq_to_channel(irq);
struct hpet_event_channel *ch;
BUG_ON(ch_idx < 0);
@@ -261,9 +261,9 @@ static void hpet_msi_write(unsigned int vector, struct msi_msg *msg)
hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
}
-static void hpet_msi_read(unsigned int vector, struct msi_msg *msg)
+static void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
{
- int ch_idx = vector_to_channel(vector);
+ int ch_idx = irq_to_channel(irq);
struct hpet_event_channel *ch;
BUG_ON(ch_idx < 0);
@@ -274,31 +274,32 @@ static void hpet_msi_read(unsigned int vector, struct msi_msg *msg)
msg->address_hi = 0;
}
-static unsigned int hpet_msi_startup(unsigned int vector)
+static unsigned int hpet_msi_startup(unsigned int irq)
{
- hpet_msi_unmask(vector);
+ hpet_msi_unmask(irq);
return 0;
}
-static void hpet_msi_shutdown(unsigned int vector)
+static void hpet_msi_shutdown(unsigned int irq)
{
- hpet_msi_mask(vector);
+ hpet_msi_mask(irq);
}
-static void hpet_msi_ack(unsigned int vector)
+static void hpet_msi_ack(unsigned int irq)
{
ack_APIC_irq();
}
-static void hpet_msi_end(unsigned int vector)
+static void hpet_msi_end(unsigned int irq)
{
}
-static void hpet_msi_set_affinity(unsigned int vector, cpumask_t mask)
+static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
{
struct msi_msg msg;
unsigned int dest;
cpumask_t tmp;
+ int vector = irq_to_vector(irq);
cpus_and(tmp, mask, cpu_online_map);
if ( cpus_empty(tmp) )
@@ -314,7 +315,7 @@ static void hpet_msi_set_affinity(unsigned int vector, cpumask_t mask)
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
hpet_msi_write(vector, &msg);
- irq_desc[vector].affinity = mask;
+ irq_desc[irq].affinity = mask;
}
/*
@@ -331,44 +332,44 @@ static struct hw_interrupt_type hpet_msi_type = {
.set_affinity = hpet_msi_set_affinity,
};
-static int hpet_setup_msi_irq(unsigned int vector)
+static int hpet_setup_msi_irq(unsigned int irq)
{
int ret;
struct msi_msg msg;
- struct hpet_event_channel *ch = &hpet_events[vector_to_channel(vector)];
+ struct hpet_event_channel *ch = &hpet_events[irq_to_channel(irq)];
- irq_desc[vector].handler = &hpet_msi_type;
- ret = request_irq_vector(vector, hpet_interrupt_handler,
+ irq_desc[irq].handler = &hpet_msi_type;
+ ret = request_irq(irq, hpet_interrupt_handler,
0, "HPET", ch);
if ( ret < 0 )
return ret;
- msi_compose_msg(NULL, vector, &msg);
- hpet_msi_write(vector, &msg);
+ msi_compose_msg(NULL, irq, &msg);
+ hpet_msi_write(irq, &msg);
return 0;
}
static int hpet_assign_irq(struct hpet_event_channel *ch)
{
- int vector;
+ int irq;
- if ( ch->vector )
+ if ( ch->irq )
return 0;
- if ( (vector = assign_irq_vector(AUTO_ASSIGN_IRQ)) < 0 )
- return vector;
+ if ( (irq = create_irq()) < 0 )
+ return irq;
- vector_channel[vector] = ch - &hpet_events[0];
+ irq_channel[irq] = ch - &hpet_events[0];
- if ( hpet_setup_msi_irq(vector) )
+ if ( hpet_setup_msi_irq(irq) )
{
- free_irq_vector(vector);
- vector_channel[vector] = -1;
+ destroy_irq(irq);
+ irq_channel[irq] = -1;
return -EINVAL;
}
- ch->vector = vector;
+ ch->irq = irq;
return 0;
}
@@ -402,8 +403,8 @@ static int hpet_fsb_cap_lookup(void)
/* set default irq affinity */
ch->cpu = num_chs_used;
per_cpu(cpu_bc_channel, ch->cpu) = ch;
- irq_desc[ch->vector].handler->
- set_affinity(ch->vector, cpumask_of_cpu(ch->cpu));
+ irq_desc[ch->irq].handler->
+ set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
num_chs_used++;
@@ -462,8 +463,8 @@ static void hpet_attach_channel_share(int cpu, struct hpet_event_channel *ch)
return;
/* set irq affinity */
- irq_desc[ch->vector].handler->
- set_affinity(ch->vector, cpumask_of_cpu(ch->cpu));
+ irq_desc[ch->irq].handler->
+ set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
}
static void hpet_detach_channel_share(int cpu)
@@ -484,8 +485,8 @@ static void hpet_detach_channel_share(int cpu)
ch->cpu = first_cpu(ch->cpumask);
/* set irq affinity */
- irq_desc[ch->vector].handler->
- set_affinity(ch->vector, cpumask_of_cpu(ch->cpu));
+ irq_desc[ch->irq].handler->
+ set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
}
static void (*hpet_attach_channel)(int cpu, struct hpet_event_channel *ch);
@@ -523,6 +524,11 @@ void hpet_broadcast_init(void)
u32 hpet_id, cfg;
int i;
+ irq_channel= xmalloc_array(int, nr_irqs);
+ BUG_ON(!irq_channel);
+ for (i = 0; i < nr_irqs ; i++)
+ irq_channel[i] = -1;
+
hpet_rate = hpet_setup();
if ( hpet_rate == 0 )
return;
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index e6bc057dcd..3af0a0aaa0 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -374,7 +374,7 @@ static void del_msixtbl_entry(struct msixtbl_entry *entry)
int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable)
{
- irq_desc_t *irq_desc;
+ struct irq_desc *irq_desc;
struct msi_desc *msi_desc;
struct pci_dev *pdev;
struct msixtbl_entry *entry, *new_entry;
@@ -429,7 +429,7 @@ out:
void msixtbl_pt_unregister(struct domain *d, int pirq)
{
- irq_desc_t *irq_desc;
+ struct irq_desc *irq_desc;
struct msi_desc *msi_desc;
struct pci_dev *pdev;
struct msixtbl_entry *entry;
diff --git a/xen/arch/x86/i8259.c b/xen/arch/x86/i8259.c
index 8df25dc8a0..0ae627f762 100644
--- a/xen/arch/x86/i8259.c
+++ b/xen/arch/x86/i8259.c
@@ -106,38 +106,28 @@ BUILD_SMP_INTERRUPT(cmci_interrupt, CMCI_APIC_VECTOR)
static DEFINE_SPINLOCK(i8259A_lock);
-static void disable_8259A_vector(unsigned int vector)
-{
- disable_8259A_irq(LEGACY_IRQ_FROM_VECTOR(vector));
-}
+static void mask_and_ack_8259A_irq(unsigned int irq);
-static void enable_8259A_vector(unsigned int vector)
+static unsigned int startup_8259A_irq(unsigned int irq)
{
- enable_8259A_irq(LEGACY_IRQ_FROM_VECTOR(vector));
+ enable_8259A_irq(irq);
+ return 0; /* never anything pending */
}
-static void mask_and_ack_8259A_vector(unsigned int);
-
-static void end_8259A_vector(unsigned int vector)
+static void end_8259A_irq(unsigned int irq)
{
- if (!(irq_desc[vector].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_8259A_vector(vector);
-}
-
-static unsigned int startup_8259A_vector(unsigned int vector)
-{
- enable_8259A_vector(vector);
- return 0; /* never anything pending */
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ enable_8259A_irq(irq);
}
static struct hw_interrupt_type i8259A_irq_type = {
.typename = "XT-PIC",
- .startup = startup_8259A_vector,
- .shutdown = disable_8259A_vector,
- .enable = enable_8259A_vector,
- .disable = disable_8259A_vector,
- .ack = mask_and_ack_8259A_vector,
- .end = end_8259A_vector
+ .startup = startup_8259A_irq,
+ .shutdown = disable_8259A_irq,
+ .enable = enable_8259A_irq,
+ .disable = disable_8259A_irq,
+ .ack = mask_and_ack_8259A_irq,
+ .end = end_8259A_irq
};
/*
@@ -237,9 +227,8 @@ static inline int i8259A_irq_real(unsigned int irq)
* first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important!
*/
-static void mask_and_ack_8259A_vector(unsigned int vector)
+static void mask_and_ack_8259A_irq(unsigned int irq)
{
- unsigned int irq = LEGACY_IRQ_FROM_VECTOR(vector);
unsigned int irqmask = 1 << irq;
unsigned long flags;
@@ -369,9 +358,9 @@ void __devinit init_8259A(int auto_eoi)
* in AEOI mode we just have to mask the interrupt
* when acking.
*/
- i8259A_irq_type.ack = disable_8259A_vector;
+ i8259A_irq_type.ack = disable_8259A_irq;
else
- i8259A_irq_type.ack = mask_and_ack_8259A_vector;
+ i8259A_irq_type.ack = mask_and_ack_8259A_irq;
udelay(100); /* wait for 8259A to initialize */
@@ -385,31 +374,25 @@ static struct irqaction cascade = { no_action, "cascade", NULL};
void __init init_IRQ(void)
{
- int i;
+ int i, vector;
init_bsp_APIC();
init_8259A(0);
- for ( i = 0; i < NR_VECTORS; i++ )
+ BUG_ON(init_irq_data() < 0);
+
+ for ( vector = FIRST_DYNAMIC_VECTOR; vector < NR_VECTORS; vector++ )
{
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].handler = &no_irq_type;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- spin_lock_init(&irq_desc[i].lock);
- cpus_setall(irq_desc[i].affinity);
- if ( i >= 0x20 )
- set_intr_gate(i, interrupt[i]);
+ if (vector == HYPERCALL_VECTOR || vector == LEGACY_SYSCALL_VECTOR)
+ continue;
+ set_intr_gate(vector, interrupt[vector]);
}
- irq_vector = xmalloc_array(u8, nr_irqs_gsi);
- memset(irq_vector, 0, nr_irqs_gsi * sizeof(*irq_vector));
-
for ( i = 0; i < 16; i++ )
{
vector_irq[LEGACY_VECTOR(i)] = i;
- irq_desc[LEGACY_VECTOR(i)].handler = &i8259A_irq_type;
+ irq_desc[i].handler = &i8259A_irq_type;
}
/* Never allocate the hypercall vector or Linux/BSD fast-trap vector. */
diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c
index b70fdf9d3c..ec71e2f104 100644
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -661,9 +661,6 @@ static inline int IO_APIC_irq_trigger(int irq)
return 0;
}
-/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-u8 *irq_vector __read_mostly = (u8 *)(1UL << (BITS_PER_LONG - 1));
-
static struct hw_interrupt_type ioapic_level_type;
static struct hw_interrupt_type ioapic_edge_type;
@@ -671,13 +668,13 @@ static struct hw_interrupt_type ioapic_edge_type;
#define IOAPIC_EDGE 0
#define IOAPIC_LEVEL 1
-static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
+static inline void ioapic_register_intr(int irq, unsigned long trigger)
{
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
- irq_desc[vector].handler = &ioapic_level_type;
+ irq_desc[irq].handler = &ioapic_level_type;
else
- irq_desc[vector].handler = &ioapic_edge_type;
+ irq_desc[irq].handler = &ioapic_edge_type;
}
static void __init setup_IO_APIC_irqs(void)
@@ -740,7 +737,7 @@ static void __init setup_IO_APIC_irqs(void)
if (IO_APIC_IRQ(irq)) {
vector = assign_irq_vector(irq);
entry.vector = vector;
- ioapic_register_intr(irq, vector, IOAPIC_AUTO);
+ ioapic_register_intr(irq, IOAPIC_AUTO);
if (!apic && (irq < 16))
disable_8259A_irq(irq);
@@ -748,7 +745,7 @@ static void __init setup_IO_APIC_irqs(void)
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
- set_native_irq_info(entry.vector, TARGET_CPUS);
+ set_native_irq_info(irq, TARGET_CPUS);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
}
@@ -788,7 +785,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
* The timer IRQ doesn't have to know that behind the
* scene we have a 8259A-master in AEOI mode ...
*/
- irq_desc[IO_APIC_VECTOR(0)].handler = &ioapic_edge_type;
+ irq_desc[0].handler = &ioapic_edge_type;
/*
* Add it to the IO-APIC irq-routing table:
@@ -1269,7 +1266,7 @@ static unsigned int startup_edge_ioapic_irq(unsigned int irq)
*/
static void ack_edge_ioapic_irq(unsigned int irq)
{
- if ((irq_desc[IO_APIC_VECTOR(irq)].status & (IRQ_PENDING | IRQ_DISABLED))
+ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
== (IRQ_PENDING | IRQ_DISABLED))
mask_IO_APIC_irq(irq);
ack_APIC_irq();
@@ -1359,7 +1356,7 @@ static void end_level_ioapic_irq (unsigned int irq)
if ( !ioapic_ack_new )
{
- if ( !(irq_desc[IO_APIC_VECTOR(irq)].status & IRQ_DISABLED) )
+ if ( !(irq_desc[irq].status & IRQ_DISABLED) )
unmask_IO_APIC_irq(irq);
return;
}
@@ -1395,70 +1392,19 @@ static void end_level_ioapic_irq (unsigned int irq)
__mask_IO_APIC_irq(irq);
__edge_IO_APIC_irq(irq);
__level_IO_APIC_irq(irq);
- if ( !(irq_desc[IO_APIC_VECTOR(irq)].status & IRQ_DISABLED) )
+ if ( !(irq_desc[irq].status & IRQ_DISABLED) )
__unmask_IO_APIC_irq(irq);
spin_unlock(&ioapic_lock);
}
}
-static unsigned int startup_edge_ioapic_vector(unsigned int vector)
-{
- int irq = vector_to_irq(vector);
- return startup_edge_ioapic_irq(irq);
-}
-
-static void ack_edge_ioapic_vector(unsigned int vector)
-{
- int irq = vector_to_irq(vector);
- ack_edge_ioapic_irq(irq);
-}
-
-static unsigned int startup_level_ioapic_vector(unsigned int vector)
-{
- int irq = vector_to_irq(vector);
- return startup_level_ioapic_irq (irq);
-}
-
-static void mask_and_ack_level_ioapic_vector(unsigned int vector)
-{
- int irq = vector_to_irq(vector);
- mask_and_ack_level_ioapic_irq(irq);
-}
-
-static void end_level_ioapic_vector(unsigned int vector)
-{
- int irq = vector_to_irq(vector);
- end_level_ioapic_irq(irq);
-}
-
-static void mask_IO_APIC_vector(unsigned int vector)
-{
- int irq = vector_to_irq(vector);
- mask_IO_APIC_irq(irq);
-}
-
-static void unmask_IO_APIC_vector(unsigned int vector)
+static void disable_edge_ioapic_irq(unsigned int irq)
{
- int irq = vector_to_irq(vector);
- unmask_IO_APIC_irq(irq);
}
-static void set_ioapic_affinity_vector(
- unsigned int vector, cpumask_t cpu_mask)
-{
- int irq = vector_to_irq(vector);
-
- set_native_irq_info(vector, cpu_mask);
- set_ioapic_affinity_irq(irq, cpu_mask);
-}
-
-static void disable_edge_ioapic_vector(unsigned int vector)
-{
-}
-
-static void end_edge_ioapic_vector(unsigned int vector)
-{
-}
+static void end_edge_ioapic_irq(unsigned int irq)
+ {
+ }
/*
* Level and edge triggered IO-APIC interrupts need different handling,
@@ -1470,53 +1416,54 @@ static void end_edge_ioapic_vector(unsigned int vector)
*/
static struct hw_interrupt_type ioapic_edge_type = {
.typename = "IO-APIC-edge",
- .startup = startup_edge_ioapic_vector,
- .shutdown = disable_edge_ioapic_vector,
- .enable = unmask_IO_APIC_vector,
- .disable = disable_edge_ioapic_vector,
- .ack = ack_edge_ioapic_vector,
- .end = end_edge_ioapic_vector,
- .set_affinity = set_ioapic_affinity_vector,
+ .startup = startup_edge_ioapic_irq,
+ .shutdown = disable_edge_ioapic_irq,
+ .enable = unmask_IO_APIC_irq,
+ .disable = disable_edge_ioapic_irq,
+ .ack = ack_edge_ioapic_irq,
+ .end = end_edge_ioapic_irq,
+ .set_affinity = set_ioapic_affinity_irq,
};
static struct hw_interrupt_type ioapic_level_type = {
.typename = "IO-APIC-level",
- .startup = startup_level_ioapic_vector,
- .shutdown = mask_IO_APIC_vector,
- .enable = unmask_IO_APIC_vector,
- .disable = mask_IO_APIC_vector,
- .ack = mask_and_ack_level_ioapic_vector,
- .end = end_level_ioapic_vector,
- .set_affinity = set_ioapic_affinity_vector,
+ .startup = startup_level_ioapic_irq,
+ .shutdown = mask_IO_APIC_irq,
+ .enable = unmask_IO_APIC_irq,
+ .disable = mask_IO_APIC_irq,
+ .ack = mask_and_ack_level_ioapic_irq,
+ .end = end_level_ioapic_irq,
+ .set_affinity = set_ioapic_affinity_irq,
};
-static unsigned int startup_msi_vector(unsigned int vector)
+static unsigned int startup_msi_irq(unsigned int irq)
{
- unmask_msi_vector(vector);
+ unmask_msi_irq(irq);
return 0;
}
-static void ack_msi_vector(unsigned int vector)
+static void ack_msi_irq(unsigned int irq)
{
- if ( msi_maskable_irq(irq_desc[vector].msi_desc) )
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if ( msi_maskable_irq(desc->msi_desc) )
ack_APIC_irq(); /* ACKTYPE_NONE */
}
-static void end_msi_vector(unsigned int vector)
+static void end_msi_irq(unsigned int irq)
{
- if ( !msi_maskable_irq(irq_desc[vector].msi_desc) )
+ if ( !msi_maskable_irq(irq_desc[irq].msi_desc) )
ack_APIC_irq(); /* ACKTYPE_EOI */
}
-static void shutdown_msi_vector(unsigned int vector)
+static void shutdown_msi_irq(unsigned int irq)
{
- mask_msi_vector(vector);
+ mask_msi_irq(irq);
}
-static void set_msi_affinity_vector(unsigned int vector, cpumask_t cpu_mask)
+static void set_msi_affinity_irq(unsigned int irq, cpumask_t cpu_mask)
{
- set_native_irq_info(vector, cpu_mask);
- set_msi_affinity(vector, cpu_mask);
+ set_msi_affinity(irq, cpu_mask);
}
/*
@@ -1525,13 +1472,13 @@ static void set_msi_affinity_vector(unsigned int vector, cpumask_t cpu_mask)
*/
struct hw_interrupt_type pci_msi_type = {
.typename = "PCI-MSI",
- .startup = startup_msi_vector,
- .shutdown = shutdown_msi_vector,
- .enable = unmask_msi_vector,
- .disable = mask_msi_vector,
- .ack = ack_msi_vector,
- .end = end_msi_vector,
- .set_affinity = set_msi_affinity_vector,
+ .startup = startup_msi_irq,
+ .shutdown = shutdown_msi_irq,
+ .enable = unmask_msi_irq,
+ .disable = mask_msi_irq,
+ .ack = ack_msi_irq,
+ .end = end_msi_irq,
+ .set_affinity = set_msi_affinity_irq,
};
static inline void init_IO_APIC_traps(void)
@@ -1543,7 +1490,7 @@ static inline void init_IO_APIC_traps(void)
make_8259A_irq(irq);
}
-static void enable_lapic_vector(unsigned int vector)
+static void enable_lapic_irq(unsigned int irq)
{
unsigned long v;
@@ -1551,7 +1498,7 @@ static void enable_lapic_vector(unsigned int vector)
apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
}
-static void disable_lapic_vector(unsigned int vector)
+static void disable_lapic_irq(unsigned int irq)
{
unsigned long v;
@@ -1559,21 +1506,21 @@ static void disable_lapic_vector(unsigned int vector)
apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
}
-static void ack_lapic_vector(unsigned int vector)
+static void ack_lapic_irq(unsigned int irq)
{
ack_APIC_irq();
}
-static void end_lapic_vector(unsigned int vector) { /* nothing */ }
+static void end_lapic_irq(unsigned int irq) { /* nothing */ }
static struct hw_interrupt_type lapic_irq_type = {
.typename = "local-APIC-edge",
.startup = NULL, /* startup_irq() not used for IRQ0 */
.shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
- .enable = enable_lapic_vector,
- .disable = disable_lapic_vector,
- .ack = ack_lapic_vector,
- .end = end_lapic_vector
+ .enable = enable_lapic_irq,
+ .disable = disable_lapic_irq,
+ .ack = ack_lapic_irq,
+ .end = end_lapic_irq,
};
/*
@@ -1661,9 +1608,9 @@ static inline void check_timer(void)
disable_8259A_irq(0);
vector = assign_irq_vector(0);
- irq_desc[IO_APIC_VECTOR(0)].action = irq_desc[LEGACY_VECTOR(0)].action;
- irq_desc[IO_APIC_VECTOR(0)].depth = 0;
- irq_desc[IO_APIC_VECTOR(0)].status &= ~IRQ_DISABLED;
+ irq_desc[0].depth = 0;
+ irq_desc[0].status &= ~IRQ_DISABLED;
+ irq_desc[0].handler = &ioapic_edge_type;
/*
* Subtle, code in do_timer_interrupt() expects an AEOI
@@ -1736,7 +1683,7 @@ static inline void check_timer(void)
printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
disable_8259A_irq(0);
- irq_desc[vector].handler = &lapic_irq_type;
+ irq_desc[0].handler = &lapic_irq_type;
apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
enable_8259A_irq(0);
@@ -2002,7 +1949,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
edge_level, active_high_low);
- ioapic_register_intr(irq, entry.vector, edge_level);
+ ioapic_register_intr(irq, edge_level);
if (!ioapic && (irq < 16))
disable_8259A_irq(irq);
@@ -2010,7 +1957,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
- set_native_irq_info(entry.vector, TARGET_CPUS);
+ set_native_irq_info(irq, TARGET_CPUS);
spin_unlock_irqrestore(&ioapic_lock, flags);
return 0;
@@ -2114,12 +2061,13 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
if ( old_rte.vector >= FIRST_DYNAMIC_VECTOR )
old_irq = vector_irq[old_rte.vector];
+
if ( new_rte.vector >= FIRST_DYNAMIC_VECTOR )
new_irq = vector_irq[new_rte.vector];
if ( (old_irq != new_irq) && (old_irq >= 0) && IO_APIC_IRQ(old_irq) )
{
- if ( irq_desc[IO_APIC_VECTOR(old_irq)].action )
+ if ( irq_desc[old_irq].action )
{
WARN_BOGUS_WRITE("Attempt to remove IO-APIC pin of in-use IRQ!\n");
spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -2131,7 +2079,7 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
if ( (new_irq >= 0) && IO_APIC_IRQ(new_irq) )
{
- if ( irq_desc[IO_APIC_VECTOR(new_irq)].action )
+ if ( irq_desc[new_irq].action )
{
WARN_BOGUS_WRITE("Attempt to %s IO-APIC pin for in-use IRQ!\n",
(old_irq != new_irq) ? "add" : "modify");
@@ -2140,7 +2088,7 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
}
/* Set the correct irq-handling type. */
- irq_desc[IO_APIC_VECTOR(new_irq)].handler = new_rte.trigger ?
+ irq_desc[new_irq].handler = new_rte.trigger ?
&ioapic_level_type: &ioapic_edge_type;
if ( old_irq != new_irq )
@@ -2252,11 +2200,17 @@ void __init init_ioapic_mappings(void)
}
if ( !smp_found_config || skip_ioapic_setup || nr_irqs_gsi < 16 )
nr_irqs_gsi = 16;
- else if ( nr_irqs_gsi > PAGE_SIZE * 8 )
+ else if ( nr_irqs_gsi > MAX_GSI_IRQS)
{
/* for PHYSDEVOP_pirq_eoi_gmfn guest assumptions */
- printk(KERN_WARNING "Limiting number of IRQs found (%u) to %lu\n",
- nr_irqs_gsi, PAGE_SIZE * 8);
- nr_irqs_gsi = PAGE_SIZE * 8;
+ printk(KERN_WARNING "Limiting number of GSI IRQs found (%u) to %lu\n",
+ nr_irqs_gsi, MAX_GSI_IRQS);
+ nr_irqs_gsi = MAX_GSI_IRQS;
}
+
+ if (nr_irqs < 2 * nr_irqs_gsi)
+ nr_irqs = 2 * nr_irqs_gsi;
+
+ if (nr_irqs > MAX_NR_IRQS)
+ nr_irqs = MAX_NR_IRQS;
}
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 1ba0f191a7..e120015b7e 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -27,13 +27,162 @@ int opt_noirqbalance = 0;
boolean_param("noirqbalance", opt_noirqbalance);
unsigned int __read_mostly nr_irqs_gsi = 16;
-irq_desc_t irq_desc[NR_VECTORS];
+unsigned int __read_mostly nr_irqs = 1024;
+integer_param("nr_irqs", nr_irqs);
+
+u8 __read_mostly *irq_vector;
+struct irq_desc __read_mostly *irq_desc = NULL;
+
+int __read_mostly *irq_status = NULL;
+#define IRQ_UNUSED (0)
+#define IRQ_USED (1)
+#define IRQ_RSVD (2)
+
+static struct timer *irq_guest_eoi_timer;
static DEFINE_SPINLOCK(vector_lock);
int vector_irq[NR_VECTORS] __read_mostly = {
[0 ... NR_VECTORS - 1] = FREE_TO_ASSIGN_IRQ
};
+static inline int find_unassigned_irq(void)
+{
+ int irq;
+
+ for (irq = nr_irqs_gsi; irq < nr_irqs; irq++)
+ if (irq_status[irq] == IRQ_UNUSED)
+ return irq;
+ return -ENOSPC;
+}
+
+/*
+ * Dynamic irq allocate and deallocation for MSI
+ */
+int create_irq(void)
+{
+ unsigned long flags;
+ int irq, ret;
+ irq = -ENOSPC;
+
+ spin_lock_irqsave(&vector_lock, flags);
+
+ irq = find_unassigned_irq();
+ if (irq < 0)
+ goto out;
+ ret = __assign_irq_vector(irq);
+ if (ret < 0)
+ irq = ret;
+out:
+ spin_unlock_irqrestore(&vector_lock, flags);
+
+ return irq;
+}
+
+void dynamic_irq_cleanup(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ desc->status |= IRQ_DISABLED;
+ desc->handler->shutdown(irq);
+ action = desc->action;
+ desc->action = NULL;
+ desc->depth = 1;
+ desc->msi_desc = NULL;
+ desc->handler = &no_irq_type;
+ cpus_setall(desc->affinity);
+ spin_unlock_irqrestore(&desc->lock, flags);
+
+ /* Wait to make sure it's not being used on another CPU */
+ do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS );
+
+ if (action)
+ xfree(action);
+}
+
+static void __clear_irq_vector(int irq)
+{
+ int vector = irq_vector[irq];
+ vector_irq[vector] = FREE_TO_ASSIGN_IRQ;
+ irq_vector[irq] = 0;
+ irq_status[irq] = IRQ_UNUSED;
+}
+
+void clear_irq_vector(int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ __clear_irq_vector(irq);
+ spin_unlock_irqrestore(&vector_lock, flags);
+}
+
+void destroy_irq(unsigned int irq)
+{
+ dynamic_irq_cleanup(irq);
+ clear_irq_vector(irq);
+}
+
+int irq_to_vector(int irq)
+{
+ int vector = -1;
+
+ BUG_ON(irq >= nr_irqs || irq < 0);
+
+ if (IO_APIC_IRQ(irq) || MSI_IRQ(irq))
+ vector = irq_vector[irq];
+ else
+ vector = LEGACY_VECTOR(irq);
+
+ return vector;
+}
+
+static void init_one_irq_desc(struct irq_desc *desc)
+{
+ desc->status = IRQ_DISABLED;
+ desc->handler = &no_irq_type;
+ desc->action = NULL;
+ desc->depth = 1;
+ desc->msi_desc = NULL;
+ spin_lock_init(&desc->lock);
+ cpus_setall(desc->affinity);
+}
+
+static void init_one_irq_status(int irq)
+{
+ irq_status[irq] = IRQ_UNUSED;
+}
+
+int init_irq_data(void)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ irq_desc = xmalloc_array(struct irq_desc, nr_irqs);
+ irq_status = xmalloc_array(int, nr_irqs);
+ irq_guest_eoi_timer = xmalloc_array(struct timer, nr_irqs);
+ irq_vector = xmalloc_array(u8, nr_irqs);
+
+ if (!irq_desc || !irq_status ||! irq_vector || !irq_guest_eoi_timer)
+ return -1;
+
+ memset(irq_desc, 0, nr_irqs * sizeof(*irq_desc));
+ memset(irq_status, 0, nr_irqs * sizeof(*irq_status));
+ memset(irq_vector, 0, nr_irqs * sizeof(*irq_vector));
+ memset(irq_guest_eoi_timer, 0, nr_irqs * sizeof(*irq_guest_eoi_timer));
+
+ for (irq = 0; irq < nr_irqs; irq++) {
+ desc = irq_to_desc(irq);
+ desc->irq = irq;
+ init_one_irq_desc(desc);
+ init_one_irq_status(irq);
+ }
+
+ return 0;
+}
+
static void __do_IRQ_guest(int vector);
void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
@@ -41,9 +190,9 @@ void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
static void enable_none(unsigned int vector) { }
static unsigned int startup_none(unsigned int vector) { return 0; }
static void disable_none(unsigned int vector) { }
-static void ack_none(unsigned int vector)
+static void ack_none(unsigned int irq)
{
- ack_bad_irq(vector);
+ ack_bad_irq(irq);
}
#define shutdown_none disable_none
@@ -61,33 +210,15 @@ struct hw_interrupt_type no_irq_type = {
atomic_t irq_err_count;
-int free_irq_vector(int vector)
-{
- int irq;
-
- BUG_ON((vector > LAST_DYNAMIC_VECTOR) || (vector < FIRST_DYNAMIC_VECTOR));
-
- spin_lock(&vector_lock);
- if ((irq = vector_irq[vector]) == AUTO_ASSIGN_IRQ)
- vector_irq[vector] = FREE_TO_ASSIGN_IRQ;
- spin_unlock(&vector_lock);
-
- return (irq == AUTO_ASSIGN_IRQ) ? 0 : -EINVAL;
-}
-
-int assign_irq_vector(int irq)
+int __assign_irq_vector(int irq)
{
static unsigned current_vector = FIRST_DYNAMIC_VECTOR;
unsigned vector;
- BUG_ON(irq >= nr_irqs_gsi && irq != AUTO_ASSIGN_IRQ);
-
- spin_lock(&vector_lock);
+ BUG_ON(irq >= nr_irqs || irq < 0);
- if ((irq != AUTO_ASSIGN_IRQ) && (irq_to_vector(irq) > 0)) {
- spin_unlock(&vector_lock);
+ if ((irq_to_vector(irq) > 0))
return irq_to_vector(irq);
- }
vector = current_vector;
while (vector_irq[vector] != FREE_TO_ASSIGN_IRQ) {
@@ -95,40 +226,59 @@ int assign_irq_vector(int irq)
if (vector > LAST_DYNAMIC_VECTOR)
vector = FIRST_DYNAMIC_VECTOR + ((vector + 1) & 7);
- if (vector == current_vector) {
- spin_unlock(&vector_lock);
+ if (vector == current_vector)
return -ENOSPC;
- }
}
current_vector = vector;
vector_irq[vector] = irq;
- if (irq != AUTO_ASSIGN_IRQ)
- IO_APIC_VECTOR(irq) = vector;
-
- spin_unlock(&vector_lock);
+ irq_vector[irq] = vector;
+ irq_status[irq] = IRQ_USED;
return vector;
}
+int assign_irq_vector(int irq)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ ret = __assign_irq_vector(irq);
+ spin_unlock_irqrestore(&vector_lock, flags);
+
+ return ret;
+}
+
+
asmlinkage void do_IRQ(struct cpu_user_regs *regs)
{
- unsigned int vector = regs->entry_vector;
- irq_desc_t *desc = &irq_desc[vector];
struct irqaction *action;
uint32_t tsc_in;
-
+ unsigned int vector = regs->entry_vector;
+ int irq = vector_irq[vector];
+ struct irq_desc *desc;
+
perfc_incr(irqs);
+ if (irq < 0) {
+ ack_APIC_irq();
+ printk("%s: %d.%d No irq handler for vector (irq %d)\n",
+ __func__, smp_processor_id(), vector, irq);
+ return;
+ }
+
+ desc = irq_to_desc(irq);
+
spin_lock(&desc->lock);
- desc->handler->ack(vector);
+ desc->handler->ack(irq);
if ( likely(desc->status & IRQ_GUEST) )
{
irq_enter();
tsc_in = tb_init_done ? get_cycles() : 0;
- __do_IRQ_guest(vector);
- TRACE_3D(TRC_TRACE_IRQ, vector, tsc_in, get_cycles());
+ __do_IRQ_guest(irq);
+ TRACE_3D(TRC_TRACE_IRQ, irq, tsc_in, get_cycles());
irq_exit();
spin_unlock(&desc->lock);
return;
@@ -153,8 +303,8 @@ asmlinkage void do_IRQ(struct cpu_user_regs *regs)
irq_enter();
spin_unlock_irq(&desc->lock);
tsc_in = tb_init_done ? get_cycles() : 0;
- action->handler(vector_to_irq(vector), action->dev_id, regs);
- TRACE_3D(TRC_TRACE_IRQ, vector, tsc_in, get_cycles());
+ action->handler(irq, action->dev_id, regs);
+ TRACE_3D(TRC_TRACE_IRQ, irq, tsc_in, get_cycles());
spin_lock_irq(&desc->lock);
irq_exit();
}
@@ -162,11 +312,11 @@ asmlinkage void do_IRQ(struct cpu_user_regs *regs)
desc->status &= ~IRQ_INPROGRESS;
out:
- desc->handler->end(vector);
+ desc->handler->end(irq);
spin_unlock(&desc->lock);
}
-int request_irq_vector(unsigned int vector,
+int request_irq(unsigned int irq,
void (*handler)(int, void *, struct cpu_user_regs *),
unsigned long irqflags, const char * devname, void *dev_id)
{
@@ -179,7 +329,7 @@ int request_irq_vector(unsigned int vector,
* which interrupt is which (messes up the interrupt freeing
* logic etc).
*/
- if (vector >= NR_VECTORS)
+ if (irq >= nr_irqs)
return -EINVAL;
if (!handler)
return -EINVAL;
@@ -192,33 +342,42 @@ int request_irq_vector(unsigned int vector,
action->name = devname;
action->dev_id = dev_id;
- retval = setup_irq_vector(vector, action);
+ retval = setup_irq(irq, action);
if (retval)
xfree(action);
return retval;
}
-void release_irq_vector(unsigned int vector)
+void release_irq(unsigned int irq)
{
- irq_desc_t *desc = &irq_desc[vector];
+ struct irq_desc *desc;
unsigned long flags;
+ struct irqaction *action;
+
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock,flags);
+ action = desc->action;
desc->action = NULL;
desc->depth = 1;
desc->status |= IRQ_DISABLED;
- desc->handler->shutdown(vector);
+ desc->handler->shutdown(irq);
spin_unlock_irqrestore(&desc->lock,flags);
/* Wait to make sure it's not being used on another CPU */
do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS );
+
+ if (action)
+ xfree(action);
}
-int setup_irq_vector(unsigned int vector, struct irqaction *new)
+int setup_irq(unsigned int irq, struct irqaction *new)
{
- irq_desc_t *desc = &irq_desc[vector];
+ struct irq_desc *desc;
unsigned long flags;
+
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock,flags);
@@ -231,7 +390,7 @@ int setup_irq_vector(unsigned int vector, struct irqaction *new)
desc->action = new;
desc->depth = 0;
desc->status &= ~IRQ_DISABLED;
- desc->handler->startup(vector);
+ desc->handler->startup(irq);
spin_unlock_irqrestore(&desc->lock,flags);
@@ -261,9 +420,10 @@ typedef struct {
* order, as only the current highest-priority pending irq can be EOIed.
*/
struct pending_eoi {
- u8 vector; /* Vector awaiting EOI */
+ u8 vector; /* vector awaiting EOI */
u8 ready; /* Ready for EOI now? */
};
+
static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]);
#define pending_eoi_sp(p) ((p)[NR_VECTORS-1].vector)
@@ -279,26 +439,25 @@ static inline void clear_pirq_eoi(struct domain *d, unsigned int irq)
clear_bit(irq, d->arch.pirq_eoi_map);
}
-static void _irq_guest_eoi(irq_desc_t *desc)
+static void _irq_guest_eoi(struct irq_desc *desc)
{
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
- unsigned int i, vector = desc - irq_desc;
+ unsigned int i, irq = desc - irq_desc;
if ( !(desc->status & IRQ_GUEST_EOI_PENDING) )
return;
for ( i = 0; i < action->nr_guests; ++i )
clear_pirq_eoi(action->guest[i],
- domain_vector_to_irq(action->guest[i], vector));
+ domain_irq_to_pirq(action->guest[i], irq));
desc->status &= ~(IRQ_INPROGRESS|IRQ_GUEST_EOI_PENDING);
- desc->handler->enable(vector);
+ desc->handler->enable(irq);
}
-static struct timer irq_guest_eoi_timer[NR_VECTORS];
static void irq_guest_eoi_timer_fn(void *data)
{
- irq_desc_t *desc = data;
+ struct irq_desc *desc = data;
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
@@ -306,20 +465,21 @@ static void irq_guest_eoi_timer_fn(void *data)
spin_unlock_irqrestore(&desc->lock, flags);
}
-static void __do_IRQ_guest(int vector)
+static void __do_IRQ_guest(int irq)
{
- irq_desc_t *desc = &irq_desc[vector];
+ struct irq_desc *desc = irq_to_desc(irq);
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
struct domain *d;
int i, sp, already_pending = 0;
struct pending_eoi *peoi = this_cpu(pending_eoi);
+ int vector = irq_to_vector(irq);
if ( unlikely(action->nr_guests == 0) )
{
/* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */
ASSERT(action->ack_type == ACKTYPE_EOI);
ASSERT(desc->status & IRQ_DISABLED);
- desc->handler->end(vector);
+ desc->handler->end(irq);
return;
}
@@ -336,13 +496,13 @@ static void __do_IRQ_guest(int vector)
for ( i = 0; i < action->nr_guests; i++ )
{
- unsigned int irq;
+ unsigned int pirq;
d = action->guest[i];
- irq = domain_vector_to_irq(d, vector);
+ pirq = domain_irq_to_pirq(d, irq);
if ( (action->ack_type != ACKTYPE_NONE) &&
- !test_and_set_bit(irq, d->pirq_mask) )
+ !test_and_set_bit(pirq, d->pirq_mask) )
action->in_flight++;
- if ( hvm_do_IRQ_dpci(d, irq) )
+ if ( hvm_do_IRQ_dpci(d, pirq) )
{
if ( action->ack_type == ACKTYPE_NONE )
{
@@ -350,7 +510,7 @@ static void __do_IRQ_guest(int vector)
desc->status |= IRQ_INPROGRESS; /* cleared during hvm eoi */
}
}
- else if ( send_guest_pirq(d, irq) &&
+ else if ( send_guest_pirq(d, pirq) &&
(action->ack_type == ACKTYPE_NONE) )
{
already_pending++;
@@ -359,13 +519,13 @@ static void __do_IRQ_guest(int vector)
if ( already_pending == action->nr_guests )
{
- stop_timer(&irq_guest_eoi_timer[vector]);
- desc->handler->disable(vector);
+ stop_timer(&irq_guest_eoi_timer[irq]);
+ desc->handler->disable(irq);
desc->status |= IRQ_GUEST_EOI_PENDING;
for ( i = 0; i < already_pending; ++i )
{
d = action->guest[i];
- set_pirq_eoi(d, domain_vector_to_irq(d, vector));
+ set_pirq_eoi(d, domain_irq_to_pirq(d, irq));
/*
* Could check here whether the guest unmasked the event by now
* (or perhaps just re-issue the send_guest_pirq()), and if it
@@ -375,9 +535,9 @@ static void __do_IRQ_guest(int vector)
* - skip the timer setup below.
*/
}
- init_timer(&irq_guest_eoi_timer[vector],
+ init_timer(&irq_guest_eoi_timer[irq],
irq_guest_eoi_timer_fn, desc, smp_processor_id());
- set_timer(&irq_guest_eoi_timer[vector], NOW() + MILLISECS(1));
+ set_timer(&irq_guest_eoi_timer[irq], NOW() + MILLISECS(1));
}
}
@@ -386,21 +546,21 @@ static void __do_IRQ_guest(int vector)
* The descriptor is returned locked. This function is safe against changes
* to the per-domain irq-to-vector mapping.
*/
-irq_desc_t *domain_spin_lock_irq_desc(
- struct domain *d, int irq, unsigned long *pflags)
+struct irq_desc *domain_spin_lock_irq_desc(
+ struct domain *d, int pirq, unsigned long *pflags)
{
- unsigned int vector;
+ unsigned int irq;
unsigned long flags;
- irq_desc_t *desc;
+ struct irq_desc *desc;
for ( ; ; )
{
- vector = domain_irq_to_vector(d, irq);
- if ( vector <= 0 )
+ irq = domain_pirq_to_irq(d, pirq);
+ if ( irq <= 0 )
return NULL;
- desc = &irq_desc[vector];
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock, flags);
- if ( vector == domain_irq_to_vector(d, irq) )
+ if ( irq == domain_pirq_to_irq(d, pirq) )
break;
spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -414,8 +574,8 @@ irq_desc_t *domain_spin_lock_irq_desc(
static void flush_ready_eoi(void)
{
struct pending_eoi *peoi = this_cpu(pending_eoi);
- irq_desc_t *desc;
- int vector, sp;
+ struct irq_desc *desc;
+ int irq, sp;
ASSERT(!local_irq_is_enabled());
@@ -423,23 +583,23 @@ static void flush_ready_eoi(void)
while ( (--sp >= 0) && peoi[sp].ready )
{
- vector = peoi[sp].vector;
- desc = &irq_desc[vector];
+ irq = vector_irq[peoi[sp].vector];
+ desc = irq_to_desc(irq);
spin_lock(&desc->lock);
- desc->handler->end(vector);
+ desc->handler->end(irq);
spin_unlock(&desc->lock);
}
pending_eoi_sp(peoi) = sp+1;
}
-static void __set_eoi_ready(irq_desc_t *desc)
+static void __set_eoi_ready(struct irq_desc *desc)
{
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
struct pending_eoi *peoi = this_cpu(pending_eoi);
- int vector, sp;
+ int irq, sp;
- vector = desc - irq_desc;
+ irq = desc - irq_desc;
if ( !(desc->status & IRQ_GUEST) ||
(action->in_flight != 0) ||
@@ -449,7 +609,7 @@ static void __set_eoi_ready(irq_desc_t *desc)
sp = pending_eoi_sp(peoi);
do {
ASSERT(sp > 0);
- } while ( peoi[--sp].vector != vector );
+ } while ( peoi[--sp].vector != irq_to_vector(irq) );
ASSERT(!peoi[sp].ready);
peoi[sp].ready = 1;
}
@@ -457,7 +617,7 @@ static void __set_eoi_ready(irq_desc_t *desc)
/* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */
static void set_eoi_ready(void *data)
{
- irq_desc_t *desc = data;
+ struct irq_desc *desc = data;
ASSERT(!local_irq_is_enabled());
@@ -468,29 +628,29 @@ static void set_eoi_ready(void *data)
flush_ready_eoi();
}
-static void __pirq_guest_eoi(struct domain *d, int irq)
+static void __pirq_guest_eoi(struct domain *d, int pirq)
{
- irq_desc_t *desc;
+ struct irq_desc *desc;
irq_guest_action_t *action;
cpumask_t cpu_eoi_map;
- int vector;
+ int irq;
ASSERT(local_irq_is_enabled());
- desc = domain_spin_lock_irq_desc(d, irq, NULL);
+ desc = domain_spin_lock_irq_desc(d, pirq, NULL);
if ( desc == NULL )
return;
action = (irq_guest_action_t *)desc->action;
- vector = desc - irq_desc;
+ irq = desc - irq_desc;
if ( action->ack_type == ACKTYPE_NONE )
{
- ASSERT(!test_bit(irq, d->pirq_mask));
- stop_timer(&irq_guest_eoi_timer[vector]);
+ ASSERT(!test_bit(pirq, d->pirq_mask));
+ stop_timer(&irq_guest_eoi_timer[irq]);
_irq_guest_eoi(desc);
}
- if ( unlikely(!test_and_clear_bit(irq, d->pirq_mask)) ||
+ if ( unlikely(!test_and_clear_bit(pirq, d->pirq_mask)) ||
unlikely(--action->in_flight != 0) )
{
spin_unlock_irq(&desc->lock);
@@ -500,7 +660,7 @@ static void __pirq_guest_eoi(struct domain *d, int irq)
if ( action->ack_type == ACKTYPE_UNMASK )
{
ASSERT(cpus_empty(action->cpu_eoi_map));
- desc->handler->end(vector);
+ desc->handler->end(irq);
spin_unlock_irq(&desc->lock);
return;
}
@@ -527,7 +687,7 @@ static void __pirq_guest_eoi(struct domain *d, int irq)
int pirq_guest_eoi(struct domain *d, int irq)
{
- if ( (irq < 0) || (irq >= d->nr_pirqs) )
+ if ( (irq < 0) || (irq > d->nr_pirqs) )
return -EINVAL;
__pirq_guest_eoi(d, irq);
@@ -551,16 +711,16 @@ int pirq_guest_unmask(struct domain *d)
}
extern int ioapic_ack_new;
-static int pirq_acktype(struct domain *d, int irq)
+static int pirq_acktype(struct domain *d, int pirq)
{
- irq_desc_t *desc;
- unsigned int vector;
+ struct irq_desc *desc;
+ unsigned int irq;
- vector = domain_irq_to_vector(d, irq);
- if ( vector <= 0 )
+ irq = domain_pirq_to_irq(d, pirq);
+ if ( irq <= 0 )
return ACKTYPE_NONE;
- desc = &irq_desc[vector];
+ desc = irq_to_desc(irq);
if ( desc->handler == &no_irq_type )
return ACKTYPE_NONE;
@@ -597,14 +757,14 @@ static int pirq_acktype(struct domain *d, int irq)
return 0;
}
-int pirq_shared(struct domain *d, int irq)
+int pirq_shared(struct domain *d, int pirq)
{
- irq_desc_t *desc;
+ struct irq_desc *desc;
irq_guest_action_t *action;
unsigned long flags;
int shared;
- desc = domain_spin_lock_irq_desc(d, irq, &flags);
+ desc = domain_spin_lock_irq_desc(d, pirq, &flags);
if ( desc == NULL )
return 0;
@@ -616,10 +776,10 @@ int pirq_shared(struct domain *d, int irq)
return shared;
}
-int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
+int pirq_guest_bind(struct vcpu *v, int pirq, int will_share)
{
- unsigned int vector;
- irq_desc_t *desc;
+ unsigned int irq;
+ struct irq_desc *desc;
irq_guest_action_t *action, *newaction = NULL;
int rc = 0;
cpumask_t cpumask = CPU_MASK_NONE;
@@ -628,7 +788,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
BUG_ON(!local_irq_is_enabled());
retry:
- desc = domain_spin_lock_irq_desc(v->domain, irq, NULL);
+ desc = domain_spin_lock_irq_desc(v->domain, pirq, NULL);
if ( desc == NULL )
{
rc = -EINVAL;
@@ -636,7 +796,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
}
action = (irq_guest_action_t *)desc->action;
- vector = desc - irq_desc;
+ irq = desc - irq_desc;
if ( !(desc->status & IRQ_GUEST) )
{
@@ -644,7 +804,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
{
gdprintk(XENLOG_INFO,
"Cannot bind IRQ %d to guest. In use by '%s'.\n",
- irq, desc->action->name);
+ pirq, desc->action->name);
rc = -EBUSY;
goto unlock_out;
}
@@ -656,7 +816,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
goto retry;
gdprintk(XENLOG_INFO,
"Cannot bind IRQ %d to guest. Out of memory.\n",
- irq);
+ pirq);
rc = -ENOMEM;
goto out;
}
@@ -668,23 +828,23 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
action->nr_guests = 0;
action->in_flight = 0;
action->shareable = will_share;
- action->ack_type = pirq_acktype(v->domain, irq);
+ action->ack_type = pirq_acktype(v->domain, pirq);
cpus_clear(action->cpu_eoi_map);
desc->depth = 0;
desc->status |= IRQ_GUEST;
desc->status &= ~IRQ_DISABLED;
- desc->handler->startup(vector);
+ desc->handler->startup(irq);
/* Attempt to bind the interrupt target to the correct CPU. */
cpu_set(v->processor, cpumask);
if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) )
- desc->handler->set_affinity(vector, cpumask);
+ desc->handler->set_affinity(irq, cpumask);
}
else if ( !will_share || !action->shareable )
{
gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. %s.\n",
- irq,
+ pirq,
will_share ?
"Others do not share" :
"Will not share with others");
@@ -707,7 +867,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
if ( action->nr_guests == IRQ_MAX_GUESTS )
{
gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. "
- "Already at max share.\n", irq);
+ "Already at max share.\n", pirq);
rc = -EBUSY;
goto unlock_out;
}
@@ -715,9 +875,9 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
action->guest[action->nr_guests++] = v->domain;
if ( action->ack_type != ACKTYPE_NONE )
- set_pirq_eoi(v->domain, irq);
+ set_pirq_eoi(v->domain, pirq);
else
- clear_pirq_eoi(v->domain, irq);
+ clear_pirq_eoi(v->domain, pirq);
unlock_out:
spin_unlock_irq(&desc->lock);
@@ -728,9 +888,9 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
}
static irq_guest_action_t *__pirq_guest_unbind(
- struct domain *d, int irq, irq_desc_t *desc)
+ struct domain *d, int pirq, struct irq_desc *desc)
{
- unsigned int vector;
+ unsigned int irq;
irq_guest_action_t *action;
cpumask_t cpu_eoi_map;
int i;
@@ -738,7 +898,7 @@ static irq_guest_action_t *__pirq_guest_unbind(
BUG_ON(!(desc->status & IRQ_GUEST));
action = (irq_guest_action_t *)desc->action;
- vector = desc - irq_desc;
+ irq = desc - irq_desc;
for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
continue;
@@ -749,13 +909,13 @@ static irq_guest_action_t *__pirq_guest_unbind(
switch ( action->ack_type )
{
case ACKTYPE_UNMASK:
- if ( test_and_clear_bit(irq, d->pirq_mask) &&
+ if ( test_and_clear_bit(pirq, d->pirq_mask) &&
(--action->in_flight == 0) )
- desc->handler->end(vector);
+ desc->handler->end(irq);
break;
case ACKTYPE_EOI:
/* NB. If #guests == 0 then we clear the eoi_map later on. */
- if ( test_and_clear_bit(irq, d->pirq_mask) &&
+ if ( test_and_clear_bit(pirq, d->pirq_mask) &&
(--action->in_flight == 0) &&
(action->nr_guests != 0) )
{
@@ -766,7 +926,7 @@ static irq_guest_action_t *__pirq_guest_unbind(
}
break;
case ACKTYPE_NONE:
- stop_timer(&irq_guest_eoi_timer[vector]);
+ stop_timer(&irq_guest_eoi_timer[irq]);
_irq_guest_eoi(desc);
break;
}
@@ -775,7 +935,7 @@ static irq_guest_action_t *__pirq_guest_unbind(
* The guest cannot re-bind to this IRQ until this function returns. So,
* when we have flushed this IRQ from pirq_mask, it should remain flushed.
*/
- BUG_ON(test_bit(irq, d->pirq_mask));
+ BUG_ON(test_bit(pirq, d->pirq_mask));
if ( action->nr_guests != 0 )
return NULL;
@@ -785,7 +945,7 @@ static irq_guest_action_t *__pirq_guest_unbind(
/* Disabling IRQ before releasing the desc_lock avoids an IRQ storm. */
desc->depth = 1;
desc->status |= IRQ_DISABLED;
- desc->handler->disable(vector);
+ desc->handler->disable(irq);
/*
* Mark any remaining pending EOIs as ready to flush.
@@ -808,35 +968,35 @@ static irq_guest_action_t *__pirq_guest_unbind(
desc->action = NULL;
desc->status &= ~IRQ_GUEST;
desc->status &= ~IRQ_INPROGRESS;
- kill_timer(&irq_guest_eoi_timer[vector]);
- desc->handler->shutdown(vector);
+ kill_timer(&irq_guest_eoi_timer[irq]);
+ desc->handler->shutdown(irq);
/* Caller frees the old guest descriptor block. */
return action;
}
-void pirq_guest_unbind(struct domain *d, int irq)
+void pirq_guest_unbind(struct domain *d, int pirq)
{
irq_guest_action_t *oldaction = NULL;
- irq_desc_t *desc;
- int vector;
+ struct irq_desc *desc;
+ int irq;
WARN_ON(!spin_is_locked(&d->event_lock));
BUG_ON(!local_irq_is_enabled());
- desc = domain_spin_lock_irq_desc(d, irq, NULL);
+ desc = domain_spin_lock_irq_desc(d, pirq, NULL);
if ( desc == NULL )
{
- vector = -domain_irq_to_vector(d, irq);
- BUG_ON(vector <= 0);
- desc = &irq_desc[vector];
+ irq = -domain_pirq_to_irq(d, pirq);
+ BUG_ON(irq <= 0);
+ desc = irq_to_desc(irq);
spin_lock_irq(&desc->lock);
- d->arch.pirq_vector[irq] = d->arch.vector_pirq[vector] = 0;
+ d->arch.pirq_irq[pirq] = d->arch.irq_pirq[irq] = 0;
}
else
{
- oldaction = __pirq_guest_unbind(d, irq, desc);
+ oldaction = __pirq_guest_unbind(d, pirq, desc);
}
spin_unlock_irq(&desc->lock);
@@ -847,7 +1007,7 @@ void pirq_guest_unbind(struct domain *d, int irq)
static int pirq_guest_force_unbind(struct domain *d, int irq)
{
- irq_desc_t *desc;
+ struct irq_desc *desc;
irq_guest_action_t *action, *oldaction = NULL;
int i, bound = 0;
@@ -887,7 +1047,7 @@ int get_free_pirq(struct domain *d, int type, int index)
if ( type == MAP_PIRQ_TYPE_GSI )
{
for ( i = 16; i < nr_irqs_gsi; i++ )
- if ( !d->arch.pirq_vector[i] )
+ if ( !d->arch.pirq_irq[i] )
break;
if ( i == nr_irqs_gsi )
return -ENOSPC;
@@ -895,7 +1055,7 @@ int get_free_pirq(struct domain *d, int type, int index)
else
{
for ( i = d->nr_pirqs - 1; i >= 16; i-- )
- if ( !d->arch.pirq_vector[i] )
+ if ( !d->arch.pirq_irq[i] )
break;
if ( i == 16 )
return -ENOSPC;
@@ -905,11 +1065,11 @@ int get_free_pirq(struct domain *d, int type, int index)
}
int map_domain_pirq(
- struct domain *d, int pirq, int vector, int type, void *data)
+ struct domain *d, int pirq, int irq, int type, void *data)
{
int ret = 0;
- int old_vector, old_pirq;
- irq_desc_t *desc;
+ int old_irq, old_pirq;
+ struct irq_desc *desc;
unsigned long flags;
struct msi_desc *msi_desc;
struct pci_dev *pdev = NULL;
@@ -920,21 +1080,21 @@ int map_domain_pirq(
if ( !IS_PRIV(current->domain) )
return -EPERM;
- if ( pirq < 0 || pirq >= d->nr_pirqs || vector < 0 || vector >= NR_VECTORS )
+ if ( pirq < 0 || pirq >= d->nr_pirqs || irq < 0 || irq >= nr_irqs )
{
- dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or vector %d\n",
- d->domain_id, pirq, vector);
+ dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or irq %d\n",
+ d->domain_id, pirq, irq);
return -EINVAL;
}
- old_vector = domain_irq_to_vector(d, pirq);
- old_pirq = domain_vector_to_irq(d, vector);
+ old_irq = domain_pirq_to_irq(d, pirq);
+ old_pirq = domain_irq_to_pirq(d, irq);
- if ( (old_vector && (old_vector != vector) ) ||
+ if ( (old_irq && (old_irq != irq) ) ||
(old_pirq && (old_pirq != pirq)) )
{
- dprintk(XENLOG_G_ERR, "dom%d: pirq %d or vector %d already mapped\n",
- d->domain_id, pirq, vector);
+ dprintk(XENLOG_G_ERR, "dom%d: pirq %d or irq %d already mapped\n",
+ d->domain_id, pirq, irq);
return -EINVAL;
}
@@ -946,7 +1106,7 @@ int map_domain_pirq(
return ret;
}
- desc = &irq_desc[vector];
+ desc = irq_to_desc(irq);
if ( type == MAP_PIRQ_TYPE_MSI )
{
@@ -964,18 +1124,18 @@ int map_domain_pirq(
spin_lock_irqsave(&desc->lock, flags);
if ( desc->handler != &no_irq_type )
- dprintk(XENLOG_G_ERR, "dom%d: vector %d in use\n",
- d->domain_id, vector);
+ dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
+ d->domain_id, irq);
desc->handler = &pci_msi_type;
- d->arch.pirq_vector[pirq] = vector;
- d->arch.vector_pirq[vector] = pirq;
- setup_msi_irq(pdev, msi_desc);
+ d->arch.pirq_irq[pirq] = irq;
+ d->arch.irq_pirq[irq] = pirq;
+ setup_msi_irq(pdev, msi_desc, irq);
spin_unlock_irqrestore(&desc->lock, flags);
} else
{
spin_lock_irqsave(&desc->lock, flags);
- d->arch.pirq_vector[pirq] = vector;
- d->arch.vector_pirq[vector] = pirq;
+ d->arch.pirq_irq[pirq] = irq;
+ d->arch.irq_pirq[irq] = pirq;
spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -987,8 +1147,8 @@ int map_domain_pirq(
int unmap_domain_pirq(struct domain *d, int pirq)
{
unsigned long flags;
- irq_desc_t *desc;
- int vector, ret = 0;
+ struct irq_desc *desc;
+ int irq, ret = 0;
bool_t forced_unbind;
struct msi_desc *msi_desc = NULL;
@@ -1001,8 +1161,8 @@ int unmap_domain_pirq(struct domain *d, int pirq)
ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
- vector = domain_irq_to_vector(d, pirq);
- if ( vector <= 0 )
+ irq = domain_pirq_to_irq(d, pirq);
+ if ( irq <= 0 )
{
dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
d->domain_id, pirq);
@@ -1015,44 +1175,41 @@ int unmap_domain_pirq(struct domain *d, int pirq)
dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
d->domain_id, pirq);
- desc = &irq_desc[vector];
+ desc = irq_to_desc(irq);
if ( (msi_desc = desc->msi_desc) != NULL )
pci_disable_msi(msi_desc);
spin_lock_irqsave(&desc->lock, flags);
- BUG_ON(vector != domain_irq_to_vector(d, pirq));
+ BUG_ON(irq != domain_pirq_to_irq(d, pirq));
if ( msi_desc )
- teardown_msi_vector(vector);
-
- if ( desc->handler == &pci_msi_type )
- desc->handler = &no_irq_type;
+ teardown_msi_irq(irq);
if ( !forced_unbind )
{
- d->arch.pirq_vector[pirq] = 0;
- d->arch.vector_pirq[vector] = 0;
+ d->arch.pirq_irq[pirq] = 0;
+ d->arch.irq_pirq[irq] = 0;
}
else
{
- d->arch.pirq_vector[pirq] = -vector;
- d->arch.vector_pirq[vector] = -pirq;
+ d->arch.pirq_irq[pirq] = -irq;
+ d->arch.irq_pirq[irq] = -pirq;
}
spin_unlock_irqrestore(&desc->lock, flags);
if (msi_desc)
- {
- msi_free_vector(msi_desc);
- free_irq_vector(vector);
- }
+ msi_free_irq(msi_desc);
ret = irq_deny_access(d, pirq);
if ( ret )
dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
d->domain_id, pirq);
+ if ( desc->handler == &pci_msi_type )
+ desc->handler = &no_irq_type;
+
done:
return ret;
}
@@ -1065,7 +1222,7 @@ void free_domain_pirqs(struct domain *d)
spin_lock(&d->event_lock);
for ( i = 0; i < d->nr_pirqs; i++ )
- if ( d->arch.pirq_vector[i] > 0 )
+ if ( d->arch.pirq_irq[i] > 0 )
unmap_domain_pirq(d, i);
spin_unlock(&d->event_lock);
@@ -1077,7 +1234,7 @@ extern void dump_ioapic_irq_info(void);
static void dump_irqs(unsigned char key)
{
int i, glob_irq, irq, vector;
- irq_desc_t *desc;
+ struct irq_desc *desc;
irq_guest_action_t *action;
struct domain *d;
unsigned long flags;
@@ -1088,8 +1245,10 @@ static void dump_irqs(unsigned char key)
{
glob_irq = vector_to_irq(vector);
+ if (glob_irq < 0)
+ continue;
- desc = &irq_desc[vector];
+ desc = irq_to_desc(glob_irq);
if ( desc == NULL || desc->handler == &no_irq_type )
continue;
@@ -1111,7 +1270,7 @@ static void dump_irqs(unsigned char key)
for ( i = 0; i < action->nr_guests; i++ )
{
d = action->guest[i];
- irq = domain_vector_to_irq(d, vector);
+ irq = domain_irq_to_pirq(d, vector_irq[vector]);
printk("%u:%3d(%c%c%c%c)",
d->domain_id, irq,
(test_bit(d->pirq_to_evtchn[glob_irq],
@@ -1172,7 +1331,7 @@ void fixup_irqs(cpumask_t map)
if ( vector_to_irq(vector) == 2 )
continue;
- desc = &irq_desc[vector];
+ desc = irq_to_desc(vector_to_irq(vector));
spin_lock_irqsave(&desc->lock, flags);
@@ -1200,9 +1359,9 @@ void fixup_irqs(cpumask_t map)
/* Clean up cpu_eoi_map of every interrupt to exclude this CPU. */
for ( vector = 0; vector < NR_VECTORS; vector++ )
{
- if ( !(irq_desc[vector].status & IRQ_GUEST) )
+ if ( !(irq_desc[vector_to_irq(vector)].status & IRQ_GUEST) )
continue;
- action = (irq_guest_action_t *)irq_desc[vector].action;
+ action = (irq_guest_action_t *)irq_desc[vector_to_irq(vector)].action;
cpu_clear(smp_processor_id(), action->cpu_eoi_map);
}
diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c
index 3bd8fa90d4..d93a0c5bb2 100644
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -116,11 +116,12 @@ static void msix_put_fixmap(struct pci_dev *dev, int idx)
/*
* MSI message composition
*/
-void msi_compose_msg(struct pci_dev *pdev, int vector,
+void msi_compose_msg(struct pci_dev *pdev, int irq,
struct msi_msg *msg)
{
unsigned dest;
cpumask_t tmp;
+ int vector = irq_to_vector(irq);
tmp = TARGET_CPUS;
if ( vector )
@@ -195,31 +196,31 @@ static void read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
iommu_read_msi_from_ire(entry, msg);
}
-static int set_vector_msi(struct msi_desc *entry)
+static int set_irq_msi(struct msi_desc *entry)
{
- if ( entry->vector >= NR_VECTORS )
+ if ( entry->irq >= nr_irqs )
{
- dprintk(XENLOG_ERR, "Trying to install msi data for Vector %d\n",
- entry->vector);
+ dprintk(XENLOG_ERR, "Trying to install msi data for irq %d\n",
+ entry->irq);
return -EINVAL;
}
- irq_desc[entry->vector].msi_desc = entry;
+ irq_desc[entry->irq].msi_desc = entry;
return 0;
}
-static int unset_vector_msi(int vector)
+static int unset_irq_msi(int irq)
{
- ASSERT(spin_is_locked(&irq_desc[vector].lock));
+ ASSERT(spin_is_locked(&irq_desc[irq].lock));
- if ( vector >= NR_VECTORS )
+ if ( irq >= nr_irqs )
{
- dprintk(XENLOG_ERR, "Trying to uninstall msi data for Vector %d\n",
- vector);
+ dprintk(XENLOG_ERR, "Trying to uninstall msi data for irq %d\n",
+ irq);
return -EINVAL;
}
- irq_desc[vector].msi_desc = NULL;
+ irq_desc[irq].msi_desc = NULL;
return 0;
}
@@ -271,9 +272,9 @@ static void write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
entry->msg = *msg;
}
-void set_msi_affinity(unsigned int vector, cpumask_t mask)
+void set_msi_affinity(unsigned int irq, cpumask_t mask)
{
- struct msi_desc *desc = irq_desc[vector].msi_desc;
+ struct msi_desc *desc = irq_desc[irq].msi_desc;
struct msi_msg msg;
unsigned int dest;
@@ -286,7 +287,7 @@ void set_msi_affinity(unsigned int vector, cpumask_t mask)
if ( !desc )
return;
- ASSERT(spin_is_locked(&irq_desc[vector].lock));
+ ASSERT(spin_is_locked(&irq_desc[irq].lock));
read_msi_msg(desc, &msg);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
@@ -333,9 +334,9 @@ static void msix_set_enable(struct pci_dev *dev, int enable)
}
}
-static void msix_flush_writes(unsigned int vector)
+static void msix_flush_writes(unsigned int irq)
{
- struct msi_desc *entry = irq_desc[vector].msi_desc;
+ struct msi_desc *entry = irq_desc[irq].msi_desc;
BUG_ON(!entry || !entry->dev);
switch (entry->msi_attrib.type) {
@@ -361,11 +362,11 @@ int msi_maskable_irq(const struct msi_desc *entry)
|| entry->msi_attrib.maskbit;
}
-static void msi_set_mask_bit(unsigned int vector, int flag)
+static void msi_set_mask_bit(unsigned int irq, int flag)
{
- struct msi_desc *entry = irq_desc[vector].msi_desc;
+ struct msi_desc *entry = irq_desc[irq].msi_desc;
- ASSERT(spin_is_locked(&irq_desc[vector].lock));
+ ASSERT(spin_is_locked(&irq_desc[irq].lock));
BUG_ON(!entry || !entry->dev);
switch (entry->msi_attrib.type) {
case PCI_CAP_ID_MSI:
@@ -397,16 +398,16 @@ static void msi_set_mask_bit(unsigned int vector, int flag)
entry->msi_attrib.masked = !!flag;
}
-void mask_msi_vector(unsigned int vector)
+void mask_msi_irq(unsigned int irq)
{
- msi_set_mask_bit(vector, 1);
- msix_flush_writes(vector);
+ msi_set_mask_bit(irq, 1);
+ msix_flush_writes(irq);
}
-void unmask_msi_vector(unsigned int vector)
+void unmask_msi_irq(unsigned int irq)
{
- msi_set_mask_bit(vector, 0);
- msix_flush_writes(vector);
+ msi_set_mask_bit(irq, 0);
+ msix_flush_writes(irq);
}
static struct msi_desc* alloc_msi_entry(void)
@@ -424,23 +425,23 @@ static struct msi_desc* alloc_msi_entry(void)
return entry;
}
-int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
{
struct msi_msg msg;
- msi_compose_msg(dev, desc->vector, &msg);
- set_vector_msi(desc);
- write_msi_msg(irq_desc[desc->vector].msi_desc, &msg);
+ msi_compose_msg(dev, irq, &msg);
+ set_irq_msi(msidesc);
+ write_msi_msg(irq_desc[irq].msi_desc, &msg);
return 0;
}
-void teardown_msi_vector(int vector)
+void teardown_msi_irq(int irq)
{
- unset_vector_msi(vector);
+ unset_irq_msi(irq);
}
-int msi_free_vector(struct msi_desc *entry)
+int msi_free_irq(struct msi_desc *entry)
{
if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )
{
@@ -452,19 +453,20 @@ int msi_free_vector(struct msi_desc *entry)
msix_put_fixmap(entry->dev, virt_to_fix(start));
}
list_del(&entry->list);
+ destroy_irq(entry->irq);
xfree(entry);
return 0;
}
static struct msi_desc *find_msi_entry(struct pci_dev *dev,
- int vector, int cap_id)
+ int irq, int cap_id)
{
struct msi_desc *entry;
list_for_each_entry( entry, &dev->msi_list, list )
{
if ( entry->msi_attrib.type == cap_id &&
- (vector == -1 || entry->vector == vector) )
+ (irq == -1 || entry->irq == irq) )
return entry;
}
@@ -481,7 +483,7 @@ static struct msi_desc *find_msi_entry(struct pci_dev *dev,
* of an entry zero with the new MSI irq or non-zero for otherwise.
**/
static int msi_capability_init(struct pci_dev *dev,
- int vector,
+ int irq,
struct msi_desc **desc)
{
struct msi_desc *entry;
@@ -507,7 +509,7 @@ static int msi_capability_init(struct pci_dev *dev,
entry->msi_attrib.maskbit = is_mask_bit_support(control);
entry->msi_attrib.masked = 1;
entry->msi_attrib.pos = pos;
- entry->vector = vector;
+ entry->irq = irq;
if ( is_mask_bit_support(control) )
entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
is_64bit_address(control));
@@ -594,7 +596,7 @@ static int msix_capability_init(struct pci_dev *dev,
entry->msi_attrib.maskbit = 1;
entry->msi_attrib.masked = 1;
entry->msi_attrib.pos = pos;
- entry->vector = msi->vector;
+ entry->irq = msi->irq;
entry->dev = dev;
entry->mask_base = base;
@@ -630,15 +632,15 @@ static int __pci_enable_msi(struct msi_info *msi, struct msi_desc **desc)
if ( !pdev )
return -ENODEV;
- if ( find_msi_entry(pdev, msi->vector, PCI_CAP_ID_MSI) )
+ if ( find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSI) )
{
- dprintk(XENLOG_WARNING, "vector %d has already mapped to MSI on "
- "device %02x:%02x.%01x.\n", msi->vector, msi->bus,
+ dprintk(XENLOG_WARNING, "irq %d has already mapped to MSI on "
+ "device %02x:%02x.%01x.\n", msi->irq, msi->bus,
PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
return 0;
}
- status = msi_capability_init(pdev, msi->vector, desc);
+ status = msi_capability_init(pdev, msi->irq, desc);
return status;
}
@@ -696,10 +698,10 @@ static int __pci_enable_msix(struct msi_info *msi, struct msi_desc **desc)
if (msi->entry_nr >= nr_entries)
return -EINVAL;
- if ( find_msi_entry(pdev, msi->vector, PCI_CAP_ID_MSIX) )
+ if ( find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSIX) )
{
- dprintk(XENLOG_WARNING, "vector %d has already mapped to MSIX on "
- "device %02x:%02x.%01x.\n", msi->vector, msi->bus,
+ dprintk(XENLOG_WARNING, "irq %d has already mapped to MSIX on "
+ "device %02x:%02x.%01x.\n", msi->irq, msi->bus,
PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
return 0;
}
@@ -754,21 +756,21 @@ void pci_disable_msi(struct msi_desc *msi_desc)
__pci_disable_msix(msi_desc);
}
-static void msi_free_vectors(struct pci_dev* dev)
+static void msi_free_irqs(struct pci_dev* dev)
{
struct msi_desc *entry, *tmp;
- irq_desc_t *desc;
- unsigned long flags, vector;
+ struct irq_desc *desc;
+ unsigned long flags, irq;
list_for_each_entry_safe( entry, tmp, &dev->msi_list, list )
{
- vector = entry->vector;
- desc = &irq_desc[vector];
+ irq = entry->irq;
+ desc = &irq_desc[irq];
pci_disable_msi(entry);
spin_lock_irqsave(&desc->lock, flags);
- teardown_msi_vector(vector);
+ teardown_msi_irq(irq);
if ( desc->handler == &pci_msi_type )
{
@@ -778,7 +780,7 @@ static void msi_free_vectors(struct pci_dev* dev)
}
spin_unlock_irqrestore(&desc->lock, flags);
- msi_free_vector(entry);
+ msi_free_irq(entry);
}
}
@@ -787,15 +789,15 @@ void pci_cleanup_msi(struct pci_dev *pdev)
/* Disable MSI and/or MSI-X */
msi_set_enable(pdev, 0);
msix_set_enable(pdev, 0);
- msi_free_vectors(pdev);
+ msi_free_irqs(pdev);
}
int pci_restore_msi_state(struct pci_dev *pdev)
{
unsigned long flags;
- int vector;
+ int irq;
struct msi_desc *entry, *tmp;
- irq_desc_t *desc;
+ struct irq_desc *desc;
ASSERT(spin_is_locked(&pcidevs_lock));
@@ -804,8 +806,8 @@ int pci_restore_msi_state(struct pci_dev *pdev)
list_for_each_entry_safe( entry, tmp, &pdev->msi_list, list )
{
- vector = entry->vector;
- desc = &irq_desc[vector];
+ irq = entry->irq;
+ desc = &irq_desc[irq];
spin_lock_irqsave(&desc->lock, flags);
@@ -826,7 +828,7 @@ int pci_restore_msi_state(struct pci_dev *pdev)
write_msi_msg(entry, &entry->msg);
- msi_set_mask_bit(vector, entry->msi_attrib.masked);
+ msi_set_mask_bit(irq, entry->msi_attrib.masked);
if ( entry->msi_attrib.type == PCI_CAP_ID_MSI )
msi_set_enable(pdev, 1);
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index d6e00c8fa4..bfdc0dc071 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -30,7 +30,7 @@ ioapic_guest_write(
static int physdev_map_pirq(struct physdev_map_pirq *map)
{
struct domain *d;
- int vector, pirq, ret = 0;
+ int pirq, irq, ret = 0;
struct msi_info _msi;
void *map_data = NULL;
@@ -51,7 +51,7 @@ static int physdev_map_pirq(struct physdev_map_pirq *map)
goto free_domain;
}
- /* Verify or get vector. */
+ /* Verify or get irq. */
switch ( map->type )
{
case MAP_PIRQ_TYPE_GSI:
@@ -62,25 +62,25 @@ static int physdev_map_pirq(struct physdev_map_pirq *map)
ret = -EINVAL;
goto free_domain;
}
- vector = domain_irq_to_vector(current->domain, map->index);
- if ( !vector )
+ irq = domain_pirq_to_irq(current->domain, map->index);
+ if ( !irq )
{
- dprintk(XENLOG_G_ERR, "dom%d: map irq with no vector %d\n",
- d->domain_id, vector);
+ dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n",
+ d->domain_id);
ret = -EINVAL;
goto free_domain;
}
break;
case MAP_PIRQ_TYPE_MSI:
- vector = map->index;
- if ( vector == -1 )
- vector = assign_irq_vector(AUTO_ASSIGN_IRQ);
+ irq = map->index;
+ if ( irq == -1 )
+ irq = create_irq();
- if ( vector < 0 || vector >= NR_VECTORS )
+ if ( irq < 0 || irq >= nr_irqs )
{
- dprintk(XENLOG_G_ERR, "dom%d: map irq with wrong vector %d\n",
- d->domain_id, vector);
+ dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
+ d->domain_id);
ret = -EINVAL;
goto free_domain;
}
@@ -89,7 +89,7 @@ static int physdev_map_pirq(struct physdev_map_pirq *map)
_msi.devfn = map->devfn;
_msi.entry_nr = map->entry_nr;
_msi.table_base = map->table_base;
- _msi.vector = vector;
+ _msi.irq = irq;
map_data = &_msi;
break;
@@ -103,7 +103,7 @@ static int physdev_map_pirq(struct physdev_map_pirq *map)
spin_lock(&pcidevs_lock);
/* Verify or get pirq. */
spin_lock(&d->event_lock);
- pirq = domain_vector_to_irq(d, vector);
+ pirq = domain_irq_to_pirq(d, irq);
if ( map->pirq < 0 )
{
if ( pirq )
@@ -132,7 +132,7 @@ static int physdev_map_pirq(struct physdev_map_pirq *map)
{
if ( pirq && pirq != map->pirq )
{
- dprintk(XENLOG_G_ERR, "dom%d: vector %d conflicts with irq %d\n",
+ dprintk(XENLOG_G_ERR, "dom%d: pirq %d conflicts with irq %d\n",
d->domain_id, map->index, map->pirq);
ret = -EEXIST;
goto done;
@@ -141,7 +141,7 @@ static int physdev_map_pirq(struct physdev_map_pirq *map)
pirq = map->pirq;
}
- ret = map_domain_pirq(d, pirq, vector, map->type, map_data);
+ ret = map_domain_pirq(d, pirq, irq, map->type, map_data);
if ( ret == 0 )
map->pirq = pirq;
@@ -149,7 +149,7 @@ done:
spin_unlock(&d->event_lock);
spin_unlock(&pcidevs_lock);
if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
- free_irq_vector(vector);
+ destroy_irq(irq);
free_domain:
rcu_unlock_domain(d);
return ret;
@@ -344,14 +344,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
irq = irq_op.irq;
ret = -EINVAL;
- if ( (irq < 0) || (irq >= nr_irqs_gsi) )
- break;
irq_op.vector = assign_irq_vector(irq);
spin_lock(&pcidevs_lock);
spin_lock(&dom0->event_lock);
- ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
+ ret = map_domain_pirq(dom0, irq_op.irq, irq,
MAP_PIRQ_TYPE_GSI, NULL);
spin_unlock(&dom0->event_lock);
spin_unlock(&pcidevs_lock);
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 21cdc50802..ea380930a6 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -922,7 +922,7 @@ void __init __start_xen(unsigned long mbi_p)
init_apic_mappings();
init_IRQ();
-
+
percpu_init_areas();
xsm_init(&initrdidx, mbi, initial_images_start);
diff --git a/xen/drivers/passthrough/amd/iommu_init.c b/xen/drivers/passthrough/amd/iommu_init.c
index cc1e81c132..2cc108b50a 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -27,7 +27,7 @@
#include <asm/hvm/svm/amd-iommu-proto.h>
#include <asm-x86/fixmap.h>
-static struct amd_iommu *vector_to_iommu[NR_VECTORS];
+static struct amd_iommu **irq_to_iommu;
static int nr_amd_iommus;
static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
static long amd_iommu_event_log_entries = IOMMU_EVENT_LOG_DEFAULT_ENTRIES;
@@ -309,7 +309,7 @@ static void amd_iommu_msi_data_init(struct amd_iommu *iommu)
u8 bus = (iommu->bdf >> 8) & 0xff;
u8 dev = PCI_SLOT(iommu->bdf & 0xff);
u8 func = PCI_FUNC(iommu->bdf & 0xff);
- int vector = iommu->vector;
+ int vector = irq_to_vector(iommu->irq);
msi_data = MSI_DATA_TRIGGER_EDGE |
MSI_DATA_LEVEL_ASSERT |
@@ -355,10 +355,10 @@ static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag)
iommu->msi_cap + PCI_MSI_FLAGS, control);
}
-static void iommu_msi_unmask(unsigned int vector)
+static void iommu_msi_unmask(unsigned int irq)
{
unsigned long flags;
- struct amd_iommu *iommu = vector_to_iommu[vector];
+ struct amd_iommu *iommu = irq_to_iommu[irq];
/* FIXME: do not support mask bits at the moment */
if ( iommu->maskbit )
@@ -369,10 +369,10 @@ static void iommu_msi_unmask(unsigned int vector)
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void iommu_msi_mask(unsigned int vector)
+static void iommu_msi_mask(unsigned int irq)
{
unsigned long flags;
- struct amd_iommu *iommu = vector_to_iommu[vector];
+ struct amd_iommu *iommu = irq_to_iommu[irq];
/* FIXME: do not support mask bits at the moment */
if ( iommu->maskbit )
@@ -383,21 +383,21 @@ static void iommu_msi_mask(unsigned int vector)
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static unsigned int iommu_msi_startup(unsigned int vector)
+static unsigned int iommu_msi_startup(unsigned int irq)
{
- iommu_msi_unmask(vector);
+ iommu_msi_unmask(irq);
return 0;
}
-static void iommu_msi_end(unsigned int vector)
+static void iommu_msi_end(unsigned int irq)
{
- iommu_msi_unmask(vector);
+ iommu_msi_unmask(irq);
ack_APIC_irq();
}
-static void iommu_msi_set_affinity(unsigned int vector, cpumask_t dest)
+static void iommu_msi_set_affinity(unsigned int irq, cpumask_t dest)
{
- struct amd_iommu *iommu = vector_to_iommu[vector];
+ struct amd_iommu *iommu = irq_to_iommu[irq];
amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));
}
@@ -451,7 +451,7 @@ static void parse_event_log_entry(u32 entry[])
}
}
-static void amd_iommu_page_fault(int vector, void *dev_id,
+static void amd_iommu_page_fault(int irq, void *dev_id,
struct cpu_user_regs *regs)
{
u32 event[4];
@@ -477,32 +477,30 @@ static void amd_iommu_page_fault(int vector, void *dev_id,
static int set_iommu_interrupt_handler(struct amd_iommu *iommu)
{
- int vector, ret;
+ int irq, ret;
- vector = assign_irq_vector(AUTO_ASSIGN_IRQ);
- if ( vector <= 0 )
+ irq = create_irq();
+ if ( irq <= 0 )
{
- gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no vectors\n");
+ gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no irqs\n");
return 0;
}
- irq_desc[vector].handler = &iommu_msi_type;
- vector_to_iommu[vector] = iommu;
- ret = request_irq_vector(vector, amd_iommu_page_fault, 0,
+ irq_desc[irq].handler = &iommu_msi_type;
+ irq_to_iommu[irq] = iommu;
+ ret = request_irq(irq, amd_iommu_page_fault, 0,
"amd_iommu", iommu);
if ( ret )
{
- irq_desc[vector].handler = &no_irq_type;
- vector_to_iommu[vector] = NULL;
- free_irq_vector(vector);
+ irq_desc[irq].handler = &no_irq_type;
+ irq_to_iommu[irq] = NULL;
+ destroy_irq(irq);
amd_iov_error("can't request irq\n");
return 0;
}
- /* Make sure that vector is never re-used. */
- vector_irq[vector] = NEVER_ASSIGN_IRQ;
- iommu->vector = vector;
- return vector;
+ iommu->irq = irq;
+ return irq;
}
void enable_iommu(struct amd_iommu *iommu)
@@ -511,6 +509,10 @@ void enable_iommu(struct amd_iommu *iommu)
spin_lock_irqsave(&iommu->lock, flags);
+ irq_to_iommu = xmalloc_array(struct amd_iommu *, nr_irqs);
+ BUG_ON(!irq_to_iommu);
+ memset(irq_to_iommu, 0, nr_irqs * sizeof(struct iommu*));
+
if ( iommu->enabled )
{
spin_unlock_irqrestore(&iommu->lock, flags);
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 97de764536..79a0c9e53a 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -35,7 +35,6 @@ static void pt_irq_time_out(void *data)
{
struct hvm_mirq_dpci_mapping *irq_map = data;
unsigned int guest_gsi, machine_gsi = 0;
- int vector;
struct hvm_irq_dpci *dpci = NULL;
struct dev_intx_gsi_link *digl;
struct hvm_girq_dpci_mapping *girq;
@@ -68,7 +67,6 @@ static void pt_irq_time_out(void *data)
machine_gsi + 1) )
{
clear_bit(machine_gsi, dpci->dirq_mask);
- vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
dpci->mirq[machine_gsi].pending = 0;
}
@@ -88,6 +86,7 @@ void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci)
xfree(dpci->mirq);
xfree(dpci->dirq_mask);
xfree(dpci->mapping);
+ xfree(dpci->hvm_timer);
xfree(dpci);
}
@@ -124,9 +123,11 @@ int pt_irq_create_bind_vtd(
BITS_TO_LONGS(d->nr_pirqs));
hvm_irq_dpci->mapping = xmalloc_array(unsigned long,
BITS_TO_LONGS(d->nr_pirqs));
+ hvm_irq_dpci->hvm_timer = xmalloc_array(struct timer, nr_irqs);
if ( !hvm_irq_dpci->mirq ||
!hvm_irq_dpci->dirq_mask ||
- !hvm_irq_dpci->mapping )
+ !hvm_irq_dpci->mapping ||
+ !hvm_irq_dpci->hvm_timer)
{
spin_unlock(&d->event_lock);
free_hvm_irq_dpci(hvm_irq_dpci);
@@ -136,6 +137,8 @@ int pt_irq_create_bind_vtd(
d->nr_pirqs * sizeof(*hvm_irq_dpci->mirq));
bitmap_zero(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
bitmap_zero(hvm_irq_dpci->mapping, d->nr_pirqs);
+ memset(hvm_irq_dpci->hvm_timer, 0,
+ nr_irqs * sizeof(*hvm_irq_dpci->hvm_timer));
for ( int i = 0; i < d->nr_pirqs; i++ )
INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
for ( int i = 0; i < NR_HVM_IRQS; i++ )
@@ -236,7 +239,7 @@ int pt_irq_create_bind_vtd(
/* Bind the same mirq once in the same domain */
if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
{
- unsigned int vector = domain_irq_to_vector(d, machine_gsi);
+ unsigned int irq = domain_pirq_to_irq(d, machine_gsi);
unsigned int share;
hvm_irq_dpci->mirq[machine_gsi].dom = d;
@@ -256,14 +259,14 @@ int pt_irq_create_bind_vtd(
/* Init timer before binding */
if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
- init_timer(&hvm_irq_dpci->hvm_timer[vector],
+ init_timer(&hvm_irq_dpci->hvm_timer[irq],
pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
/* Deal with gsi for legacy devices */
rc = pirq_guest_bind(d->vcpu[0], machine_gsi, share);
if ( unlikely(rc) )
{
if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
- kill_timer(&hvm_irq_dpci->hvm_timer[vector]);
+ kill_timer(&hvm_irq_dpci->hvm_timer[irq]);
hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
clear_bit(machine_gsi, hvm_irq_dpci->mapping);
list_del(&girq->list);
@@ -349,7 +352,7 @@ int pt_irq_destroy_bind_vtd(
pirq_guest_unbind(d, machine_gsi);
msixtbl_pt_unregister(d, machine_gsi);
if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
- kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
+ kill_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, machine_gsi)]);
hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
hvm_irq_dpci->mirq[machine_gsi].flags = 0;
clear_bit(machine_gsi, hvm_irq_dpci->mapping);
@@ -357,7 +360,7 @@ int pt_irq_destroy_bind_vtd(
}
spin_unlock(&d->event_lock);
gdprintk(XENLOG_INFO,
- "XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
+ "XEN_DOMCTL_irq_unmapping: m_irq = 0x%x device = 0x%x intx = 0x%x\n",
machine_gsi, device, intx);
return 0;
@@ -367,7 +370,7 @@ int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
{
struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
- ASSERT(spin_is_locked(&irq_desc[domain_irq_to_vector(d, mirq)].lock));
+ ASSERT(spin_is_locked(&irq_desc[domain_pirq_to_irq(d, mirq)].lock));
if ( !iommu_enabled || (d == dom0) || !dpci ||
!test_bit(mirq, dpci->mapping))
return 0;
@@ -425,7 +428,7 @@ static int hvm_pci_msi_assert(struct domain *d, int pirq)
static void hvm_dirq_assist(unsigned long _d)
{
- unsigned int irq;
+ unsigned int pirq;
uint32_t device, intx;
struct domain *d = (struct domain *)_d;
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
@@ -433,34 +436,34 @@ static void hvm_dirq_assist(unsigned long _d)
ASSERT(hvm_irq_dpci);
- for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
- irq < d->nr_pirqs;
- irq = find_next_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs, irq + 1) )
+ for ( pirq = find_first_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
+ pirq < d->nr_pirqs;
+ pirq = find_next_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs, pirq + 1) )
{
- if ( !test_and_clear_bit(irq, hvm_irq_dpci->dirq_mask) )
+ if ( !test_and_clear_bit(pirq, hvm_irq_dpci->dirq_mask) )
continue;
spin_lock(&d->event_lock);
#ifdef SUPPORT_MSI_REMAPPING
- if ( hvm_irq_dpci->mirq[irq].flags & HVM_IRQ_DPCI_GUEST_MSI )
+ if ( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI )
{
- hvm_pci_msi_assert(d, irq);
+ hvm_pci_msi_assert(d, pirq);
spin_unlock(&d->event_lock);
continue;
}
#endif
- list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
+ list_for_each_entry ( digl, &hvm_irq_dpci->mirq[pirq].digl_list, list )
{
device = digl->device;
intx = digl->intx;
hvm_pci_intx_assert(d, device, intx);
- hvm_irq_dpci->mirq[irq].pending++;
+ hvm_irq_dpci->mirq[pirq].pending++;
#ifdef SUPPORT_MSI_REMAPPING
- if ( hvm_irq_dpci->mirq[irq].flags & HVM_IRQ_DPCI_TRANSLATE )
+ if ( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_TRANSLATE )
{
/* for translated MSI to INTx interrupt, eoi as early as possible */
- __msi_pirq_eoi(d, irq);
+ __msi_pirq_eoi(d, pirq);
}
#endif
}
@@ -472,8 +475,8 @@ static void hvm_dirq_assist(unsigned long _d)
* guest will never deal with the irq, then the physical interrupt line
* will never be deasserted.
*/
- if ( pt_irq_need_timer(hvm_irq_dpci->mirq[irq].flags) )
- set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[pirq].flags) )
+ set_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, pirq)],
NOW() + PT_IRQ_TIME_OUT);
spin_unlock(&d->event_lock);
}
@@ -501,7 +504,7 @@ static void __hvm_dpci_eoi(struct domain *d,
! pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
return;
- stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
+ stop_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, machine_gsi)]);
pirq_guest_eoi(d, machine_gsi);
}
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 3da1103642..bf47ae97b8 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -216,7 +216,7 @@ static void pci_clean_dpci_irqs(struct domain *d)
i = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, i + 1) )
{
pirq_guest_unbind(d, i);
- kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, i)]);
+ kill_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, i)]);
list_for_each_safe ( digl_list, tmp,
&hvm_irq_dpci->mirq[i].digl_list )
@@ -408,7 +408,7 @@ static void dump_pci_devices(unsigned char ch)
pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
pdev->domain ? pdev->domain->domain_id : -1);
list_for_each_entry ( msi, &pdev->msi_list, list )
- printk("%d ", msi->vector);
+ printk("%d ", msi->irq);
printk(">\n");
}
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 83cb073b21..33ecdc0162 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -31,6 +31,8 @@
#include <xen/pci_regs.h>
#include <xen/keyhandler.h>
#include <asm/msi.h>
+#include <asm/irq.h>
+#include <mach_apic.h>
#include "iommu.h"
#include "dmar.h"
#include "extern.h"
@@ -659,7 +661,7 @@ static void iommu_disable_translation(struct iommu *iommu)
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-static struct iommu *vector_to_iommu[NR_VECTORS];
+static struct iommu **irq_to_iommu;
static int iommu_page_fault_do_one(struct iommu *iommu, int type,
u8 fault_reason, u16 source_id, u64 addr)
{
@@ -705,7 +707,7 @@ static void iommu_fault_status(u32 fault_status)
}
#define PRIMARY_FAULT_REG_LEN (16)
-static void iommu_page_fault(int vector, void *dev_id,
+static void iommu_page_fault(int irq, void *dev_id,
struct cpu_user_regs *regs)
{
struct iommu *iommu = dev_id;
@@ -777,9 +779,9 @@ clear_overflow:
}
}
-static void dma_msi_unmask(unsigned int vector)
+static void dma_msi_unmask(unsigned int irq)
{
- struct iommu *iommu = vector_to_iommu[vector];
+ struct iommu *iommu = irq_to_iommu[irq];
unsigned long flags;
/* unmask it */
@@ -788,10 +790,10 @@ static void dma_msi_unmask(unsigned int vector)
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-static void dma_msi_mask(unsigned int vector)
+static void dma_msi_mask(unsigned int irq)
{
unsigned long flags;
- struct iommu *iommu = vector_to_iommu[vector];
+ struct iommu *iommu = irq_to_iommu[irq];
/* mask it */
spin_lock_irqsave(&iommu->register_lock, flags);
@@ -799,22 +801,23 @@ static void dma_msi_mask(unsigned int vector)
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-static unsigned int dma_msi_startup(unsigned int vector)
+static unsigned int dma_msi_startup(unsigned int irq)
{
- dma_msi_unmask(vector);
+ dma_msi_unmask(irq);
return 0;
}
-static void dma_msi_end(unsigned int vector)
+static void dma_msi_end(unsigned int irq)
{
- dma_msi_unmask(vector);
+ dma_msi_unmask(irq);
ack_APIC_irq();
}
-static void dma_msi_data_init(struct iommu *iommu, int vector)
+static void dma_msi_data_init(struct iommu *iommu, int irq)
{
u32 msi_data = 0;
unsigned long flags;
+ int vector = irq_to_vector(irq);
/* Fixed, edge, assert mode. Follow MSI setting */
msi_data |= vector & 0xff;
@@ -842,9 +845,9 @@ static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu)
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest)
+static void dma_msi_set_affinity(unsigned int irq, cpumask_t dest)
{
- struct iommu *iommu = vector_to_iommu[vector];
+ struct iommu *iommu = irq_to_iommu[irq];
dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));
}
@@ -861,31 +864,28 @@ static struct hw_interrupt_type dma_msi_type = {
static int iommu_set_interrupt(struct iommu *iommu)
{
- int vector, ret;
+ int irq, ret;
- vector = assign_irq_vector(AUTO_ASSIGN_IRQ);
- if ( vector <= 0 )
+ irq = create_irq();
+ if ( irq <= 0 )
{
- gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no vectors\n");
+ gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no irq available!\n");
return -EINVAL;
}
- irq_desc[vector].handler = &dma_msi_type;
- vector_to_iommu[vector] = iommu;
- ret = request_irq_vector(vector, iommu_page_fault, 0, "dmar", iommu);
+ irq_desc[irq].handler = &dma_msi_type;
+ irq_to_iommu[irq] = iommu;
+ ret = request_irq(irq, iommu_page_fault, 0, "dmar", iommu);
if ( ret )
{
- irq_desc[vector].handler = &no_irq_type;
- vector_to_iommu[vector] = NULL;
- free_irq_vector(vector);
+ irq_desc[irq].handler = &no_irq_type;
+ irq_to_iommu[irq] = NULL;
+ destroy_irq(irq);
gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: can't request irq\n");
return ret;
}
- /* Make sure that vector is never re-used. */
- vector_irq[vector] = NEVER_ASSIGN_IRQ;
-
- return vector;
+ return irq;
}
static int iommu_alloc(struct acpi_drhd_unit *drhd)
@@ -906,7 +906,7 @@ static int iommu_alloc(struct acpi_drhd_unit *drhd)
return -ENOMEM;
memset(iommu, 0, sizeof(struct iommu));
- iommu->vector = -1; /* No vector assigned yet. */
+ iommu->irq = -1; /* No irq assigned yet. */
iommu->intel = alloc_intel_iommu();
if ( iommu->intel == NULL )
@@ -966,7 +966,7 @@ static void iommu_free(struct acpi_drhd_unit *drhd)
iounmap(iommu->reg);
free_intel_iommu(iommu->intel);
- release_irq_vector(iommu->vector);
+ destroy_irq(iommu->irq);
xfree(iommu);
drhd->iommu = NULL;
@@ -1581,24 +1581,24 @@ static int init_vtd_hw(void)
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
struct iommu_flush *flush = NULL;
- int vector;
+ int irq = -1;
int ret;
unsigned long flags;
for_each_drhd_unit ( drhd )
{
iommu = drhd->iommu;
- if ( iommu->vector < 0 )
+ if ( iommu->irq < 0 )
{
- vector = iommu_set_interrupt(iommu);
- if ( vector < 0 )
+ irq = iommu_set_interrupt(iommu);
+ if ( irq < 0 )
{
gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: interrupt setup failed\n");
- return vector;
+ return irq;
}
- iommu->vector = vector;
+ iommu->irq = irq;
}
- dma_msi_data_init(iommu, iommu->vector);
+ dma_msi_data_init(iommu, iommu->irq);
dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
clear_fault_bits(iommu);
@@ -1703,6 +1703,13 @@ int intel_vtd_setup(void)
spin_lock_init(&domid_bitmap_lock);
clflush_size = get_cache_line_size();
+ irq_to_iommu = xmalloc_array(struct iommu*, nr_irqs);
+ BUG_ON(!irq_to_iommu);
+ memset(irq_to_iommu, 0, nr_irqs * sizeof(struct iommu*));
+
+ if(!irq_to_iommu)
+ return -ENOMEM;
+
/* We enable the following features only if they are supported by all VT-d
* engines: Snoop Control, DMA passthrough, Queued Invalidation and
* Interrupt Remapping.
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c
index 9316cef96f..3617508ff5 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -121,7 +121,7 @@ void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
hvm_pci_intx_deassert(d, digl->device, digl->intx);
if ( --dpci->mirq[i].pending == 0 )
{
- stop_timer(&dpci->hvm_timer[domain_irq_to_vector(d, i)]);
+ stop_timer(&dpci->hvm_timer[domain_pirq_to_irq(d, i)]);
pirq_guest_eoi(d, i);
}
}
diff --git a/xen/include/asm-x86/amd-iommu.h b/xen/include/asm-x86/amd-iommu.h
index cdc99dd6e8..c2811f9ab7 100644
--- a/xen/include/asm-x86/amd-iommu.h
+++ b/xen/include/asm-x86/amd-iommu.h
@@ -79,7 +79,7 @@ struct amd_iommu {
int maskbit;
int enabled;
- int vector;
+ int irq;
};
struct ivrs_mappings {
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 92c9a9d375..8e6f0aba57 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -262,9 +262,9 @@ struct arch_domain
/* Shadow translated domain: P2M mapping */
pagetable_t phys_table;
- /* NB. protected by d->event_lock and by irq_desc[vector].lock */
- int vector_pirq[NR_VECTORS];
- s16 *pirq_vector;
+ /* NB. protected by d->event_lock and by irq_desc[irq].lock */
+ int *irq_pirq;
+ int *pirq_irq;
/* Shared page for notifying that explicit PIRQ EOI is required. */
unsigned long *pirq_eoi_map;
diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h
index 7963c2add3..5e864b1c32 100644
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -7,19 +7,25 @@
#include <asm/atomic.h>
#include <irq_vectors.h>
-#define IO_APIC_IRQ(irq) (((irq) >= 16) || ((1<<(irq)) & io_apic_irqs))
+#define IO_APIC_IRQ(irq) (((irq) >= 16 && (irq) < nr_irqs_gsi) \
+ || (((irq) < 16) && (1<<(irq)) & io_apic_irqs))
#define IO_APIC_VECTOR(irq) (irq_vector[irq])
+#define MSI_IRQ(irq) ((irq) >= nr_irqs_gsi && (irq) < nr_irqs)
+
#define LEGACY_VECTOR(irq) ((irq) + FIRST_LEGACY_VECTOR)
#define LEGACY_IRQ_FROM_VECTOR(vec) ((vec) - FIRST_LEGACY_VECTOR)
-#define irq_to_vector(irq) \
- (IO_APIC_IRQ(irq) ? IO_APIC_VECTOR(irq) : LEGACY_VECTOR(irq))
#define vector_to_irq(vec) (vector_irq[vec])
+#define irq_to_desc(irq) &irq_desc[(irq)]
+
+#define MAX_GSI_IRQS PAGE_SIZE * 8
+#define MAX_NR_IRQS (2 * MAX_GSI_IRQS)
extern int vector_irq[NR_VECTORS];
extern u8 *irq_vector;
+extern int irq_to_vector(int irq);
#define platform_legacy_irq(irq) ((irq) < 16)
fastcall void event_check_interrupt(void);
@@ -51,17 +57,21 @@ extern atomic_t irq_mis_count;
int pirq_shared(struct domain *d , int irq);
-int map_domain_pirq(struct domain *d, int pirq, int vector, int type,
+int map_domain_pirq(struct domain *d, int pirq, int irq, int type,
void *data);
int unmap_domain_pirq(struct domain *d, int pirq);
int get_free_pirq(struct domain *d, int type, int index);
void free_domain_pirqs(struct domain *d);
-#define domain_irq_to_vector(d, irq) ((d)->arch.pirq_vector[irq] ?: \
- IO_APIC_IRQ(irq) ? 0 : LEGACY_VECTOR(irq))
-#define domain_vector_to_irq(d, vec) ((d)->arch.vector_pirq[vec] ?: \
- ((vec) < FIRST_LEGACY_VECTOR || \
- (vec) > LAST_LEGACY_VECTOR) ? \
- 0 : LEGACY_IRQ_FROM_VECTOR(vec))
+int init_irq_data(void);
+
+void clear_irq_vector(int irq);
+int __assign_irq_vector(int irq);
+
+int create_irq(void);
+void destroy_irq(unsigned int irq);
+
+#define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
+#define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
#endif /* _ASM_HW_IRQ_H */
diff --git a/xen/include/asm-x86/mach-default/irq_vectors.h b/xen/include/asm-x86/mach-default/irq_vectors.h
index 30c3f89daf..7ca3312def 100644
--- a/xen/include/asm-x86/mach-default/irq_vectors.h
+++ b/xen/include/asm-x86/mach-default/irq_vectors.h
@@ -23,6 +23,7 @@
#define LAST_LEGACY_VECTOR 0xef
#define HYPERCALL_VECTOR 0x82
+#define LEGACY_SYSCALL_VECTOR 0x80
/* Dynamically-allocated vectors available to any driver. */
#define FIRST_DYNAMIC_VECTOR 0x20
diff --git a/xen/include/asm-x86/msi.h b/xen/include/asm-x86/msi.h
index 56387c6004..dc9877b0a2 100644
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -2,7 +2,6 @@
#define __ASM_MSI_H
#include <xen/cpumask.h>
-#include <asm/irq.h>
/*
* Constants for Intel APIC based MSI messages.
*/
@@ -57,7 +56,7 @@
struct msi_info {
int bus;
int devfn;
- int vector;
+ int irq;
int entry_nr;
uint64_t table_base;
};
@@ -70,14 +69,14 @@ struct msi_msg {
struct msi_desc;
/* Helper functions */
-extern void mask_msi_vector(unsigned int vector);
-extern void unmask_msi_vector(unsigned int vector);
+extern void mask_msi_irq(unsigned int irq);
+extern void unmask_msi_irq(unsigned int irq);
extern void set_msi_affinity(unsigned int vector, cpumask_t mask);
extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc);
extern void pci_disable_msi(struct msi_desc *desc);
extern void pci_cleanup_msi(struct pci_dev *pdev);
-extern int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
-extern void teardown_msi_vector(int vector);
+extern int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq);
+extern void teardown_msi_irq(int irq);
extern int msi_free_vector(struct msi_desc *entry);
extern int pci_restore_msi_state(struct pci_dev *pdev);
@@ -97,7 +96,7 @@ struct msi_desc {
void __iomem *mask_base; /* va for the entry in mask table */
struct pci_dev *dev;
- int vector;
+ int irq;
struct msi_msg msg; /* Last set MSI message */
@@ -105,6 +104,7 @@ struct msi_desc {
};
int msi_maskable_irq(const struct msi_desc *);
+int msi_free_irq(struct msi_desc *entry);
/*
* Assume the maximum number of hot plug slots supported by the system is about
diff --git a/xen/include/xen/hvm/irq.h b/xen/include/xen/hvm/irq.h
index b50cff6da9..739507af95 100644
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -88,7 +88,7 @@ struct hvm_irq_dpci {
DECLARE_BITMAP(isairq_map, NR_ISAIRQS);
/* Record of mapped Links */
uint8_t link_cnt[NR_LINK];
- struct timer hvm_timer[NR_VECTORS];
+ struct timer *hvm_timer;
struct tasklet dirq_tasklet;
};
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 3468ebcaaf..402914d603 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -53,7 +53,7 @@ struct iommu {
spinlock_t lock; /* protect context, domain ids */
spinlock_t register_lock; /* protect iommu register handling */
u64 root_maddr; /* root entry machine address */
- int vector;
+ int irq;
struct intel_iommu *intel;
};
diff --git a/xen/include/xen/irq.h b/xen/include/xen/irq.h
index 1380d5e1c0..bbf48adea0 100644
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -53,6 +53,7 @@ typedef struct hw_interrupt_type hw_irq_controller;
# define nr_irqs_gsi NR_IRQS
#else
extern unsigned int nr_irqs_gsi;
+extern unsigned int nr_irqs;
#endif
struct msi_desc;
@@ -63,24 +64,20 @@ struct msi_desc;
*
* Pad this out to 32 bytes for cache and indexing reasons.
*/
-typedef struct {
+typedef struct irq_desc{
unsigned int status; /* IRQ status */
hw_irq_controller *handler;
struct msi_desc *msi_desc;
struct irqaction *action; /* IRQ action list */
unsigned int depth; /* nested irq disables */
+ int irq;
spinlock_t lock;
cpumask_t affinity;
} __cacheline_aligned irq_desc_t;
+#ifndef CONFIG_X86
extern irq_desc_t irq_desc[NR_VECTORS];
-extern int setup_irq_vector(unsigned int, struct irqaction *);
-extern void release_irq_vector(unsigned int);
-extern int request_irq_vector(unsigned int vector,
- void (*handler)(int, void *, struct cpu_user_regs *),
- unsigned long irqflags, const char * devname, void *dev_id);
-
#define setup_irq(irq, action) \
setup_irq_vector(irq_to_vector(irq), action)
@@ -90,6 +87,16 @@ extern int request_irq_vector(unsigned int vector,
#define request_irq(irq, handler, irqflags, devname, devid) \
request_irq_vector(irq_to_vector(irq), handler, irqflags, devname, devid)
+#else
+extern struct irq_desc *irq_desc;
+
+extern int setup_irq(unsigned int irq, struct irqaction *);
+extern void release_irq(unsigned int irq);
+extern int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct cpu_user_regs *),
+ unsigned long irqflags, const char * devname, void *dev_id);
+#endif
+
extern hw_irq_controller no_irq_type;
extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
@@ -102,16 +109,18 @@ extern void pirq_guest_unbind(struct domain *d, int irq);
extern irq_desc_t *domain_spin_lock_irq_desc(
struct domain *d, int irq, unsigned long *pflags);
-static inline void set_native_irq_info(unsigned int vector, cpumask_t mask)
+static inline void set_native_irq_info(unsigned int irq, cpumask_t mask)
{
- irq_desc[vector].affinity = mask;
+ irq_desc[irq].affinity = mask;
}
-#ifdef irq_to_vector
static inline void set_irq_info(int irq, cpumask_t mask)
{
+#ifdef CONFIG_X86
+ set_native_irq_info(irq, mask);
+#else
set_native_irq_info(irq_to_vector(irq), mask);
-}
#endif
+}
#endif /* __XEN_IRQ_H__ */