aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/ia64/vmx/vmx_interrupt.c4
-rw-r--r--xen/arch/ia64/xen/hypercall.c7
-rw-r--r--xen/arch/ia64/xen/irq.c46
-rw-r--r--xen/arch/x86/domain.c38
-rw-r--r--xen/arch/x86/hvm/hvm.c46
-rw-r--r--xen/arch/x86/hvm/irq.c2
-rw-r--r--xen/arch/x86/hvm/vmsi.c25
-rw-r--r--xen/arch/x86/irq.c358
-rw-r--r--xen/arch/x86/physdev.c36
-rw-r--r--xen/common/domain.c28
-rw-r--r--xen/common/event_channel.c42
-rw-r--r--xen/common/radix-tree.c44
-rw-r--r--xen/common/tmem.c1
-rw-r--r--xen/drivers/passthrough/io.c386
-rw-r--r--xen/drivers/passthrough/pci.c41
-rw-r--r--xen/drivers/passthrough/vtd/x86/vtd.c51
-rw-r--r--xen/include/asm-ia64/domain.h18
-rw-r--r--xen/include/asm-x86/domain.h7
-rw-r--r--xen/include/asm-x86/hvm/domain.h3
-rw-r--r--xen/include/asm-x86/hvm/irq.h2
-rw-r--r--xen/include/asm-x86/irq.h31
-rw-r--r--xen/include/xen/domain.h6
-rw-r--r--xen/include/xen/event.h2
-rw-r--r--xen/include/xen/hvm/irq.h39
-rw-r--r--xen/include/xen/iommu.h4
-rw-r--r--xen/include/xen/irq.h34
-rw-r--r--xen/include/xen/pci.h5
-rw-r--r--xen/include/xen/radix-tree.h4
-rw-r--r--xen/include/xen/sched.h9
29 files changed, 506 insertions, 813 deletions
diff --git a/xen/arch/ia64/vmx/vmx_interrupt.c b/xen/arch/ia64/vmx/vmx_interrupt.c
index 83bb36a907..dc00afb808 100644
--- a/xen/arch/ia64/vmx/vmx_interrupt.c
+++ b/xen/arch/ia64/vmx/vmx_interrupt.c
@@ -155,13 +155,13 @@ void hvm_isa_irq_deassert(struct domain *d, unsigned int isa_irq)
/* dummy */
}
-int msixtbl_pt_register(struct domain *d, struct pirq *pirq, uint64_t gtable)
+int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable)
{
/* dummy */
return -ENOSYS;
}
-void msixtbl_pt_unregister(struct domain *d, struct pirq *pirq)
+void msixtbl_pt_unregister(struct domain *d, int pirq)
{
/* dummy */
}
diff --git a/xen/arch/ia64/xen/hypercall.c b/xen/arch/ia64/xen/hypercall.c
index 44c2a9c327..6ea15c290a 100644
--- a/xen/arch/ia64/xen/hypercall.c
+++ b/xen/arch/ia64/xen/hypercall.c
@@ -65,11 +65,8 @@ static long __do_pirq_guest_eoi(struct domain *d, int pirq)
{
if ( pirq < 0 || pirq >= NR_IRQS )
return -EINVAL;
- if ( d->arch.pirq_eoi_map ) {
- spin_lock(&d->event_lock);
- evtchn_unmask(pirq_to_evtchn(d, pirq));
- spin_unlock(&d->event_lock);
- }
+ if ( d->arch.pirq_eoi_map )
+ evtchn_unmask(d->pirq_to_evtchn[pirq]);
return pirq_guest_eoi(d, pirq);
}
diff --git a/xen/arch/ia64/xen/irq.c b/xen/arch/ia64/xen/irq.c
index fd19db3a54..f8bdb44566 100644
--- a/xen/arch/ia64/xen/irq.c
+++ b/xen/arch/ia64/xen/irq.c
@@ -363,17 +363,15 @@ void __do_IRQ_guest(int irq)
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
struct domain *d;
- struct pirq *pirq;
int i, already_pending = 0;
for ( i = 0; i < action->nr_guests; i++ )
{
d = action->guest[i];
- pirq = pirq_info(d, irq);
if ( (action->ack_type != ACKTYPE_NONE) &&
- !test_and_set_bool(pirq->masked) )
+ !test_and_set_bit(irq, &d->pirq_mask) )
action->in_flight++;
- if ( hvm_do_IRQ_dpci(d, pirq) )
+ if ( hvm_do_IRQ_dpci(d, irq) )
{
if ( action->ack_type == ACKTYPE_NONE )
{
@@ -381,7 +379,7 @@ void __do_IRQ_guest(int irq)
desc->status |= IRQ_INPROGRESS; /* cleared during hvm eoi */
}
}
- else if ( send_guest_pirq(d, pirq) &&
+ else if ( send_guest_pirq(d, irq) &&
(action->ack_type == ACKTYPE_NONE) )
{
already_pending++;
@@ -425,23 +423,26 @@ static int pirq_acktype(int irq)
return ACKTYPE_NONE;
}
-int pirq_guest_eoi(struct domain *d, struct pirq *pirq)
+int pirq_guest_eoi(struct domain *d, int irq)
{
irq_desc_t *desc;
irq_guest_action_t *action;
+ if ( (irq < 0) || (irq >= NR_IRQS) )
+ return -EINVAL;
+
desc = &irq_desc[irq];
spin_lock_irq(&desc->lock);
action = (irq_guest_action_t *)desc->action;
if ( action->ack_type == ACKTYPE_NONE )
{
- ASSERT(!pirq->masked);
+ ASSERT(!test_bit(irq, d->pirq_mask));
stop_timer(&irq_guest_eoi_timer[irq]);
_irq_guest_eoi(desc);
}
- if ( test_and_clear_bool(pirq->masked) && (--action->in_flight == 0) )
+ if ( test_and_clear_bit(irq, &d->pirq_mask) && (--action->in_flight == 0) )
{
ASSERT(action->ack_type == ACKTYPE_UNMASK);
desc->handler->end(irq);
@@ -454,27 +455,22 @@ int pirq_guest_eoi(struct domain *d, struct pirq *pirq)
int pirq_guest_unmask(struct domain *d)
{
- unsigned int pirq = 0, n, i;
- unsigned long indexes[16];
- struct pirq *pirqs[ARRAY_SIZE(indexes)];
+ int irq;
shared_info_t *s = d->shared_info;
- do {
- n = radix_tree_gang_lookup(&d->pirq_tree, (void **)pirqs, pirq,
- ARRAY_SIZE(pirqs), indexes);
- for ( i = 0; i < n; ++i )
- {
- pirq = indexes[i];
- if ( pirqs[i]->masked &&
- !test_bit(pirqs[i]->evtchn, &s->evtchn_mask[0]) )
- pirq_guest_eoi(d, pirqs[i]);
- }
- } while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
+ for ( irq = find_first_bit(d->pirq_mask, NR_IRQS);
+ irq < NR_IRQS;
+ irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) )
+ {
+ if ( !test_bit(d->pirq_to_evtchn[irq], &s->evtchn_mask[0]) )
+ pirq_guest_eoi(d, irq);
+
+ }
return 0;
}
-int pirq_guest_bind(struct vcpu *v, int irq, struct pirq *pirq, int will_share)
+int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action;
@@ -558,7 +554,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, struct pirq *pirq, int will_share)
return rc;
}
-void pirq_guest_unbind(struct domain *d, int irq, struct pirq *pirq)
+void pirq_guest_unbind(struct domain *d, int irq)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action;
@@ -576,7 +572,7 @@ void pirq_guest_unbind(struct domain *d, int irq, struct pirq *pirq)
action->nr_guests--;
if ( action->ack_type == ACKTYPE_UNMASK )
- if ( test_and_clear_bool(pirq->masked) &&
+ if ( test_and_clear_bit(irq, &d->pirq_mask) &&
(--action->in_flight == 0) )
desc->handler->end(irq);
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 6a3b12ec2e..f1d9247cc9 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -608,8 +608,34 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
share_xen_page_with_guest(
virt_to_page(d->shared_info), d, XENSHARE_writable);
- if ( (rc = init_domain_irq_mapping(d)) != 0 )
+ d->arch.pirq_irq = xmalloc_array(int, d->nr_pirqs);
+ if ( !d->arch.pirq_irq )
goto fail;
+ memset(d->arch.pirq_irq, 0,
+ d->nr_pirqs * sizeof(*d->arch.pirq_irq));
+
+ d->arch.irq_pirq = xmalloc_array(int, nr_irqs);
+ if ( !d->arch.irq_pirq )
+ goto fail;
+ memset(d->arch.irq_pirq, 0,
+ nr_irqs * sizeof(*d->arch.irq_pirq));
+
+ for ( i = 1; platform_legacy_irq(i); ++i )
+ if ( !IO_APIC_IRQ(i) )
+ d->arch.irq_pirq[i] = d->arch.pirq_irq[i] = i;
+
+ if ( is_hvm_domain(d) )
+ {
+ d->arch.pirq_emuirq = xmalloc_array(int, d->nr_pirqs);
+ d->arch.emuirq_pirq = xmalloc_array(int, nr_irqs);
+ if ( !d->arch.pirq_emuirq || !d->arch.emuirq_pirq )
+ goto fail;
+ for (i = 0; i < d->nr_pirqs; i++)
+ d->arch.pirq_emuirq[i] = IRQ_UNBOUND;
+ for (i = 0; i < nr_irqs; i++)
+ d->arch.emuirq_pirq[i] = IRQ_UNBOUND;
+ }
+
if ( (rc = iommu_domain_init(d)) != 0 )
goto fail;
@@ -644,7 +670,10 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
fail:
d->is_dying = DOMDYING_dead;
vmce_destroy_msr(d);
- cleanup_domain_irq_mapping(d);
+ xfree(d->arch.pirq_irq);
+ xfree(d->arch.irq_pirq);
+ xfree(d->arch.pirq_emuirq);
+ xfree(d->arch.emuirq_pirq);
free_xenheap_page(d->shared_info);
if ( paging_initialised )
paging_final_teardown(d);
@@ -696,7 +725,10 @@ void arch_domain_destroy(struct domain *d)
#endif
free_xenheap_page(d->shared_info);
- cleanup_domain_irq_mapping(d);
+ xfree(d->arch.pirq_irq);
+ xfree(d->arch.irq_pirq);
+ xfree(d->arch.pirq_emuirq);
+ xfree(d->arch.emuirq_pirq);
}
unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 744e3df6b5..085230322b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -252,36 +252,32 @@ void hvm_migrate_timers(struct vcpu *v)
pt_migrate(v);
}
-static int hvm_migrate_pirq(struct domain *d, unsigned int pirq,
- struct hvm_pirq_dpci *pirq_dpci, void *arg)
-{
- struct vcpu *v = arg;
-
- if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) &&
- (pirq_dpci->gmsi.dest_vcpu_id == v->vcpu_id) )
- {
- struct irq_desc *desc =
- pirq_spin_lock_irq_desc(d, dpci_pirq(pirq_dpci), NULL);
-
- if ( !desc )
- return 0;
- ASSERT(MSI_IRQ(desc - irq_desc));
- irq_set_affinity(desc, cpumask_of(v->processor));
- spin_unlock_irq(&desc->lock);
- }
-
- return 0;
-}
-
void hvm_migrate_pirqs(struct vcpu *v)
{
+ int pirq, irq;
+ struct irq_desc *desc;
struct domain *d = v->domain;
-
- if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci )
+ struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+
+ if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
return;
spin_lock(&d->event_lock);
- pt_pirq_iterate(d, hvm_migrate_pirq, v);
+ for ( pirq = find_first_bit(hvm_irq_dpci->mapping, d->nr_pirqs);
+ pirq < d->nr_pirqs;
+ pirq = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, pirq + 1) )
+ {
+ if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI) ||
+ (hvm_irq_dpci->mirq[pirq].gmsi.dest_vcpu_id != v->vcpu_id) )
+ continue;
+ desc = domain_spin_lock_irq_desc(v->domain, pirq, NULL);
+ if (!desc)
+ continue;
+ irq = desc - irq_desc;
+ ASSERT(MSI_IRQ(irq));
+ irq_set_affinity(desc, cpumask_of(v->processor));
+ spin_unlock_irq(&desc->lock);
+ }
spin_unlock(&d->event_lock);
}
@@ -505,6 +501,8 @@ int hvm_domain_initialise(struct domain *d)
return rc;
}
+extern void msixtbl_pt_cleanup(struct domain *d);
+
void hvm_domain_relinquish_resources(struct domain *d)
{
hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 61f839b570..f1deada076 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -33,7 +33,7 @@ static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
int pirq = domain_emuirq_to_pirq(d, ioapic_gsi);
if ( pirq != IRQ_UNBOUND )
{
- send_guest_pirq(d, pirq_info(d, pirq));
+ send_guest_pirq(d, pirq);
return;
}
vioapic_irq_positive_edge(d, ioapic_gsi);
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index f037af88fa..8ed26f6106 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -65,10 +65,11 @@ static void vmsi_inj_irq(
}
}
-int vmsi_deliver(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci)
+int vmsi_deliver(struct domain *d, int pirq)
{
- uint32_t flags = pirq_dpci->gmsi.gflags;
- int vector = pirq_dpci->gmsi.gvec;
+ struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+ uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags;
+ int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec;
uint8_t dest = (uint8_t)flags;
uint8_t dest_mode = !!(flags & VMSI_DM_MASK);
uint8_t delivery_mode = (flags & VMSI_DELIV_MASK) >> GLFAGS_SHIFT_DELIV_MODE;
@@ -81,7 +82,11 @@ int vmsi_deliver(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci)
"vector=%x trig_mode=%x\n",
dest, dest_mode, delivery_mode, vector, trig_mode);
- ASSERT(pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI);
+ if ( !( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI ) )
+ {
+ gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
+ return 0;
+ }
switch ( delivery_mode )
{
@@ -344,7 +349,7 @@ static void del_msixtbl_entry(struct msixtbl_entry *entry)
call_rcu(&entry->rcu, free_msixtbl_entry);
}
-int msixtbl_pt_register(struct domain *d, struct pirq *pirq, uint64_t gtable)
+int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable)
{
struct irq_desc *irq_desc;
struct msi_desc *msi_desc;
@@ -353,7 +358,6 @@ int msixtbl_pt_register(struct domain *d, struct pirq *pirq, uint64_t gtable)
int r = -EINVAL;
ASSERT(spin_is_locked(&pcidevs_lock));
- ASSERT(spin_is_locked(&d->event_lock));
/*
* xmalloc() with irq_disabled causes the failure of check_lock()
@@ -363,7 +367,7 @@ int msixtbl_pt_register(struct domain *d, struct pirq *pirq, uint64_t gtable)
if ( !new_entry )
return -ENOMEM;
- irq_desc = pirq_spin_lock_irq_desc(d, pirq, NULL);
+ irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
if ( !irq_desc )
{
xfree(new_entry);
@@ -400,7 +404,7 @@ out:
return r;
}
-void msixtbl_pt_unregister(struct domain *d, struct pirq *pirq)
+void msixtbl_pt_unregister(struct domain *d, int pirq)
{
struct irq_desc *irq_desc;
struct msi_desc *msi_desc;
@@ -408,9 +412,8 @@ void msixtbl_pt_unregister(struct domain *d, struct pirq *pirq)
struct msixtbl_entry *entry;
ASSERT(spin_is_locked(&pcidevs_lock));
- ASSERT(spin_is_locked(&d->event_lock));
- irq_desc = pirq_spin_lock_irq_desc(d, pirq, NULL);
+ irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
if ( !irq_desc )
return;
@@ -444,7 +447,7 @@ found:
spin_unlock_irq(&irq_desc->lock);
}
-void msixtbl_pt_cleanup(struct domain *d)
+void msixtbl_pt_cleanup(struct domain *d, int pirq)
{
struct msixtbl_entry *entry, *temp;
unsigned long flags;
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 2f40b8552e..68aec1e9e6 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -814,7 +814,7 @@ static void irq_guest_eoi_timer_fn(void *data)
{
struct domain *d = action->guest[i];
unsigned int pirq = domain_irq_to_pirq(d, irq);
- if ( test_and_clear_bool(pirq_info(d, pirq)->masked) )
+ if ( test_and_clear_bit(pirq, d->pirq_mask) )
action->in_flight--;
}
}
@@ -874,12 +874,11 @@ static void __do_IRQ_guest(int irq)
for ( i = 0; i < action->nr_guests; i++ )
{
- struct pirq *pirq;
-
+ unsigned int pirq;
d = action->guest[i];
- pirq = pirq_info(d, domain_irq_to_pirq(d, irq));
+ pirq = domain_irq_to_pirq(d, irq);
if ( (action->ack_type != ACKTYPE_NONE) &&
- !test_and_set_bool(pirq->masked) )
+ !test_and_set_bit(pirq, d->pirq_mask) )
action->in_flight++;
if ( hvm_do_IRQ_dpci(d, pirq) )
{
@@ -951,139 +950,6 @@ struct irq_desc *domain_spin_lock_irq_desc(
return desc;
}
-/*
- * Same with struct pirq already looked up, and d->event_lock already
- * held (thus the PIRQ <-> IRQ mapping can't change under our feet).
- */
-struct irq_desc *pirq_spin_lock_irq_desc(
- struct domain *d, const struct pirq *pirq, unsigned long *pflags)
-{
- int irq = pirq->arch.irq;
- struct irq_desc *desc;
- unsigned long flags;
-
- ASSERT(spin_is_locked(&d->event_lock));
-
- if ( irq <= 0 )
- return NULL;
-
- desc = irq_to_desc(irq);
- spin_lock_irqsave(&desc->lock, flags);
-
- if ( pflags )
- *pflags = flags;
-
- ASSERT(pirq == pirq_info(d, domain_irq_to_pirq(d, irq)));
- ASSERT(irq == pirq->arch.irq);
-
- return desc;
-}
-
-static int set_domain_irq_pirq(struct domain *d, int irq, int pirq)
-{
- int err = radix_tree_insert(&d->arch.irq_pirq, irq, (void *)(long)pirq,
- NULL, NULL);
-
- switch ( err )
- {
- struct pirq *info;
-
- case -EEXIST:
- *radix_tree_lookup_slot(&d->arch.irq_pirq, irq) = (void *)(long)pirq;
- /* fall through */
- case 0:
- info = pirq_get_info(d, pirq);
- if ( info )
- {
- info->arch.irq = irq;
- return 0;
- }
- radix_tree_delete(&d->arch.irq_pirq, irq, NULL);
- err = -ENOMEM;
- break;
- }
-
- return err;
-}
-
-static void clear_domain_irq_pirq(struct domain *d, int irq, int pirq,
- struct pirq *info)
-{
- info->arch.irq = 0;
- pirq_cleanup_check(info, d, pirq);
- radix_tree_delete(&d->arch.irq_pirq, irq, NULL);
-}
-
-int init_domain_irq_mapping(struct domain *d)
-{
- unsigned int i;
- int err;
-
- INIT_RADIX_TREE(&d->arch.irq_pirq, 0);
- if ( is_hvm_domain(d) )
- INIT_RADIX_TREE(&d->arch.hvm_domain.emuirq_pirq, 0);
-
- for ( i = 1, err = 0; !err && platform_legacy_irq(i); ++i )
- if ( !IO_APIC_IRQ(i) )
- err = set_domain_irq_pirq(d, i, i);
-
- return err;
-}
-
-static void irq_slot_free(void *unused)
-{
-}
-
-void cleanup_domain_irq_mapping(struct domain *d)
-{
- radix_tree_destroy(&d->arch.irq_pirq, irq_slot_free, NULL);
- if ( is_hvm_domain(d) )
- radix_tree_destroy(&d->arch.hvm_domain.emuirq_pirq,
- irq_slot_free, NULL);
-}
-
-struct pirq *alloc_pirq_struct(struct domain *d)
-{
- size_t sz = is_hvm_domain(d) ? sizeof(struct pirq) :
- offsetof(struct pirq, arch.hvm);
- struct pirq *pirq = xmalloc_bytes(sz);
-
- if ( pirq )
- {
- memset(pirq, 0, sz);
- if ( is_hvm_domain(d) )
- {
- pirq->arch.hvm.emuirq = IRQ_UNBOUND;
- pt_pirq_init(d, &pirq->arch.hvm.dpci);
- }
- }
-
- return pirq;
-}
-
-void (pirq_cleanup_check)(struct pirq *info, struct domain *d, int pirq)
-{
- /*
- * Check whether all fields have their default values, and delete
- * the entry from the tree if so.
- *
- * NB: Common parts were already checked.
- */
- if ( info->arch.irq )
- return;
-
- if ( is_hvm_domain(d) )
- {
- if ( info->arch.hvm.emuirq != IRQ_UNBOUND )
- return;
- if ( !pt_pirq_cleanup_check(&info->arch.hvm.dpci) )
- return;
- }
-
- if ( radix_tree_delete(&d->pirq_tree, pirq, NULL) != info )
- BUG();
-}
-
/* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
static void flush_ready_eoi(void)
{
@@ -1144,22 +1010,18 @@ static void set_eoi_ready(void *data)
flush_ready_eoi();
}
-void pirq_guest_eoi(struct domain *d, struct pirq *pirq)
-{
- struct irq_desc *desc;
-
- ASSERT(local_irq_is_enabled());
- desc = pirq_spin_lock_irq_desc(d, pirq, NULL);
- if ( desc )
- desc_guest_eoi(d, desc, pirq);
-}
-
-void desc_guest_eoi(struct domain *d, struct irq_desc *desc, struct pirq *pirq)
+static void __pirq_guest_eoi(struct domain *d, int pirq)
{
+ struct irq_desc *desc;
irq_guest_action_t *action;
cpumask_t cpu_eoi_map;
int irq;
+ ASSERT(local_irq_is_enabled());
+ desc = domain_spin_lock_irq_desc(d, pirq, NULL);
+ if ( desc == NULL )
+ return;
+
if ( !(desc->status & IRQ_GUEST) )
{
spin_unlock_irq(&desc->lock);
@@ -1171,12 +1033,12 @@ void desc_guest_eoi(struct domain *d, struct irq_desc *desc, struct pirq *pirq)
if ( action->ack_type == ACKTYPE_NONE )
{
- ASSERT(!pirq->masked);
+ ASSERT(!test_bit(pirq, d->pirq_mask));
stop_timer(&action->eoi_timer);
_irq_guest_eoi(desc);
}
- if ( unlikely(!test_and_clear_bool(pirq->masked)) ||
+ if ( unlikely(!test_and_clear_bit(pirq, d->pirq_mask)) ||
unlikely(--action->in_flight != 0) )
{
spin_unlock_irq(&desc->lock);
@@ -1211,23 +1073,27 @@ void desc_guest_eoi(struct domain *d, struct irq_desc *desc, struct pirq *pirq)
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
}
+int pirq_guest_eoi(struct domain *d, int irq)
+{
+ if ( (irq < 0) || (irq >= d->nr_pirqs) )
+ return -EINVAL;
+
+ __pirq_guest_eoi(d, irq);
+
+ return 0;
+}
+
int pirq_guest_unmask(struct domain *d)
{
- unsigned int pirq = 0, n, i;
- unsigned long indexes[16];
- struct pirq *pirqs[ARRAY_SIZE(indexes)];
+ unsigned int irq, nr = d->nr_pirqs;
- do {
- n = radix_tree_gang_lookup(&d->pirq_tree, (void **)pirqs, pirq,
- ARRAY_SIZE(pirqs), indexes);
- for ( i = 0; i < n; ++i )
- {
- pirq = indexes[i];
- if ( pirqs[i]->masked &&
- !test_bit(pirqs[i]->evtchn, &shared_info(d, evtchn_mask)) )
- pirq_guest_eoi(d, pirqs[i]);
- }
- } while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
+ for ( irq = find_first_bit(d->pirq_mask, nr);
+ irq < nr;
+ irq = find_next_bit(d->pirq_mask, nr, irq+1) )
+ {
+ if ( !test_bit(d->pirq_to_evtchn[irq], &shared_info(d, evtchn_mask)) )
+ __pirq_guest_eoi(d, irq);
+ }
return 0;
}
@@ -1297,7 +1163,7 @@ int pirq_shared(struct domain *d, int pirq)
return shared;
}
-int pirq_guest_bind(struct vcpu *v, int pirq, struct pirq *info, int will_share)
+int pirq_guest_bind(struct vcpu *v, int pirq, int will_share)
{
unsigned int irq;
struct irq_desc *desc;
@@ -1309,7 +1175,7 @@ int pirq_guest_bind(struct vcpu *v, int pirq, struct pirq *info, int will_share)
BUG_ON(!local_irq_is_enabled());
retry:
- desc = pirq_spin_lock_irq_desc(v->domain, info, NULL);
+ desc = domain_spin_lock_irq_desc(v->domain, pirq, NULL);
if ( desc == NULL )
{
rc = -EINVAL;
@@ -1410,7 +1276,7 @@ int pirq_guest_bind(struct vcpu *v, int pirq, struct pirq *info, int will_share)
}
static irq_guest_action_t *__pirq_guest_unbind(
- struct domain *d, int pirq, struct pirq *info, struct irq_desc *desc)
+ struct domain *d, int pirq, struct irq_desc *desc)
{
unsigned int irq;
irq_guest_action_t *action;
@@ -1439,13 +1305,13 @@ static irq_guest_action_t *__pirq_guest_unbind(
switch ( action->ack_type )
{
case ACKTYPE_UNMASK:
- if ( test_and_clear_bool(info->masked) &&
+ if ( test_and_clear_bit(pirq, d->pirq_mask) &&
(--action->in_flight == 0) )
desc->handler->end(irq);
break;
case ACKTYPE_EOI:
/* NB. If #guests == 0 then we clear the eoi_map later on. */
- if ( test_and_clear_bool(info->masked) &&
+ if ( test_and_clear_bit(pirq, d->pirq_mask) &&
(--action->in_flight == 0) &&
(action->nr_guests != 0) )
{
@@ -1463,9 +1329,9 @@ static irq_guest_action_t *__pirq_guest_unbind(
/*
* The guest cannot re-bind to this IRQ until this function returns. So,
- * when we have flushed this IRQ from ->masked, it should remain flushed.
+ * when we have flushed this IRQ from pirq_mask, it should remain flushed.
*/
- BUG_ON(info->masked);
+ BUG_ON(test_bit(pirq, d->pirq_mask));
if ( action->nr_guests != 0 )
return NULL;
@@ -1503,7 +1369,7 @@ static irq_guest_action_t *__pirq_guest_unbind(
return action;
}
-void pirq_guest_unbind(struct domain *d, int pirq, struct pirq *info)
+void pirq_guest_unbind(struct domain *d, int pirq)
{
irq_guest_action_t *oldaction = NULL;
struct irq_desc *desc;
@@ -1512,19 +1378,19 @@ void pirq_guest_unbind(struct domain *d, int pirq, struct pirq *info)
WARN_ON(!spin_is_locked(&d->event_lock));
BUG_ON(!local_irq_is_enabled());
- desc = pirq_spin_lock_irq_desc(d, info, NULL);
+ desc = domain_spin_lock_irq_desc(d, pirq, NULL);
if ( desc == NULL )
{
- irq = -info->arch.irq;
+ irq = -domain_pirq_to_irq(d, pirq);
BUG_ON(irq <= 0);
desc = irq_to_desc(irq);
spin_lock_irq(&desc->lock);
- clear_domain_irq_pirq(d, irq, pirq, info);
+ d->arch.pirq_irq[pirq] = d->arch.irq_pirq[irq] = 0;
}
else
{
- oldaction = __pirq_guest_unbind(d, pirq, info, desc);
+ oldaction = __pirq_guest_unbind(d, pirq, desc);
}
spin_unlock_irq(&desc->lock);
@@ -1536,7 +1402,7 @@ void pirq_guest_unbind(struct domain *d, int pirq, struct pirq *info)
}
}
-static int pirq_guest_force_unbind(struct domain *d, int irq, struct pirq *info)
+static int pirq_guest_force_unbind(struct domain *d, int irq)
{
struct irq_desc *desc;
irq_guest_action_t *action, *oldaction = NULL;
@@ -1545,7 +1411,7 @@ static int pirq_guest_force_unbind(struct domain *d, int irq, struct pirq *info)
WARN_ON(!spin_is_locked(&d->event_lock));
BUG_ON(!local_irq_is_enabled());
- desc = pirq_spin_lock_irq_desc(d, info, NULL);
+ desc = domain_spin_lock_irq_desc(d, irq, NULL);
BUG_ON(desc == NULL);
if ( !(desc->status & IRQ_GUEST) )
@@ -1565,7 +1431,7 @@ static int pirq_guest_force_unbind(struct domain *d, int irq, struct pirq *info)
goto out;
bound = 1;
- oldaction = __pirq_guest_unbind(d, irq, info, desc);
+ oldaction = __pirq_guest_unbind(d, irq, desc);
out:
spin_unlock_irq(&desc->lock);
@@ -1579,13 +1445,6 @@ static int pirq_guest_force_unbind(struct domain *d, int irq, struct pirq *info)
return bound;
}
-static inline bool_t is_free_pirq(const struct domain *d,
- const struct pirq *pirq)
-{
- return !pirq || (!pirq->arch.irq && (!is_hvm_domain(d) ||
- pirq->arch.hvm.emuirq == IRQ_UNBOUND));
-}
-
int get_free_pirq(struct domain *d, int type, int index)
{
int i;
@@ -1595,17 +1454,29 @@ int get_free_pirq(struct domain *d, int type, int index)
if ( type == MAP_PIRQ_TYPE_GSI )
{
for ( i = 16; i < nr_irqs_gsi; i++ )
- if ( is_free_pirq(d, pirq_info(d, i)) )
- return i;
+ if ( !d->arch.pirq_irq[i] )
+ {
+ if ( !is_hvm_domain(d) ||
+ d->arch.pirq_emuirq[i] == IRQ_UNBOUND )
+ break;
+ }
+ if ( i == nr_irqs_gsi )
+ return -ENOSPC;
}
else
{
for ( i = d->nr_pirqs - 1; i >= nr_irqs_gsi; i-- )
- if ( is_free_pirq(d, pirq_info(d, i)) )
- return i;
+ if ( !d->arch.pirq_irq[i] )
+ {
+ if ( !is_hvm_domain(d) ||
+ d->arch.pirq_emuirq[i] == IRQ_UNBOUND )
+ break;
+ }
+ if ( i < nr_irqs_gsi )
+ return -ENOSPC;
}
- return -ENOSPC;
+ return i;
}
int map_domain_pirq(
@@ -1673,23 +1544,15 @@ int map_domain_pirq(
dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
d->domain_id, irq);
desc->handler = &pci_msi_type;
- ret = set_domain_irq_pirq(d, irq, pirq);
- if ( !ret )
- {
- setup_msi_irq(pdev, msi_desc, irq);
- spin_unlock_irqrestore(&desc->lock, flags);
- }
- else
- {
- desc->handler = &no_irq_type;
- spin_unlock_irqrestore(&desc->lock, flags);
- pci_disable_msi(msi_desc);
- }
- }
- else
+ d->arch.pirq_irq[pirq] = irq;
+ d->arch.irq_pirq[irq] = pirq;
+ setup_msi_irq(pdev, msi_desc, irq);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ } else
{
spin_lock_irqsave(&desc->lock, flags);
- ret = set_domain_irq_pirq(d, irq, pirq);
+ d->arch.pirq_irq[pirq] = irq;
+ d->arch.irq_pirq[irq] = pirq;
spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -1704,7 +1567,6 @@ int unmap_domain_pirq(struct domain *d, int pirq)
struct irq_desc *desc;
int irq, ret = 0;
bool_t forced_unbind;
- struct pirq *info;
struct msi_desc *msi_desc = NULL;
if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
@@ -1713,8 +1575,8 @@ int unmap_domain_pirq(struct domain *d, int pirq)
ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
- info = pirq_info(d, pirq);
- if ( !info || (irq = info->arch.irq) <= 0 )
+ irq = domain_pirq_to_irq(d, pirq);
+ if ( irq <= 0 )
{
dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
d->domain_id, pirq);
@@ -1722,7 +1584,7 @@ int unmap_domain_pirq(struct domain *d, int pirq)
goto done;
}
- forced_unbind = pirq_guest_force_unbind(d, pirq, info);
+ forced_unbind = pirq_guest_force_unbind(d, pirq);
if ( forced_unbind )
dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
d->domain_id, pirq);
@@ -1737,11 +1599,14 @@ int unmap_domain_pirq(struct domain *d, int pirq)
BUG_ON(irq != domain_pirq_to_irq(d, pirq));
if ( !forced_unbind )
- clear_domain_irq_pirq(d, irq, pirq, info);
+ {
+ d->arch.pirq_irq[pirq] = 0;
+ d->arch.irq_pirq[irq] = 0;
+ }
else
{
- info->arch.irq = -irq;
- *radix_tree_lookup_slot(&d->arch.irq_pirq, irq) = (void *)(long)-pirq;
+ d->arch.pirq_irq[pirq] = -irq;
+ d->arch.irq_pirq[irq] = -pirq;
}
spin_unlock_irqrestore(&desc->lock, flags);
@@ -1768,7 +1633,7 @@ void free_domain_pirqs(struct domain *d)
spin_lock(&d->event_lock);
for ( i = 0; i < d->nr_pirqs; i++ )
- if ( domain_pirq_to_irq(d, i) > 0 )
+ if ( d->arch.pirq_irq[i] > 0 )
unmap_domain_pirq(d, i);
spin_unlock(&d->event_lock);
@@ -1784,7 +1649,6 @@ static void dump_irqs(unsigned char key)
struct irq_cfg *cfg;
irq_guest_action_t *action;
struct domain *d;
- const struct pirq *info;
unsigned long flags;
printk("Guest interrupt information:\n");
@@ -1819,18 +1683,20 @@ static void dump_irqs(unsigned char key)
{
d = action->guest[i];
pirq = domain_irq_to_pirq(d, irq);
- info = pirq_info(d, pirq);
printk("%u:%3d(%c%c%c%c)",
d->domain_id, pirq,
- (test_bit(info->evtchn,
+ (test_bit(d->pirq_to_evtchn[pirq],
&shared_info(d, evtchn_pending)) ?
'P' : '-'),
- (test_bit(info->evtchn / BITS_PER_EVTCHN_WORD(d),
+ (test_bit(d->pirq_to_evtchn[pirq] /
+ BITS_PER_EVTCHN_WORD(d),
&vcpu_info(d->vcpu[0], evtchn_pending_sel)) ?
'S' : '-'),
- (test_bit(info->evtchn, &shared_info(d, evtchn_mask)) ?
+ (test_bit(d->pirq_to_evtchn[pirq],
+ &shared_info(d, evtchn_mask)) ?
'M' : '-'),
- (info->masked ? 'M' : '-'));
+ (test_bit(pirq, d->pirq_mask) ?
+ 'M' : '-'));
if ( i != action->nr_guests )
printk(",");
}
@@ -1937,7 +1803,6 @@ void fixup_irqs(void)
int map_domain_emuirq_pirq(struct domain *d, int pirq, int emuirq)
{
int old_emuirq = IRQ_UNBOUND, old_pirq = IRQ_UNBOUND;
- struct pirq *info;
ASSERT(spin_is_locked(&d->event_lock));
@@ -1964,30 +1829,10 @@ int map_domain_emuirq_pirq(struct domain *d, int pirq, int emuirq)
return 0;
}
- info = pirq_get_info(d, pirq);
- if ( !info )
- return -ENOMEM;
-
+ d->arch.pirq_emuirq[pirq] = emuirq;
/* do not store emuirq mappings for pt devices */
if ( emuirq != IRQ_PT )
- {
- int err = radix_tree_insert(&d->arch.hvm_domain.emuirq_pirq, emuirq,
- (void *)((long)pirq + 1), NULL, NULL);
-
- switch ( err )
- {
- case 0:
- break;
- case -EEXIST:
- *radix_tree_lookup_slot(&d->arch.hvm_domain.emuirq_pirq, emuirq) =
- (void *)((long)pirq + 1);
- break;
- default:
- pirq_cleanup_check(info, d, pirq);
- return err;
- }
- }
- info->arch.hvm.emuirq = emuirq;
+ d->arch.emuirq_pirq[emuirq] = pirq;
return 0;
}
@@ -1995,7 +1840,6 @@ int map_domain_emuirq_pirq(struct domain *d, int pirq, int emuirq)
int unmap_domain_pirq_emuirq(struct domain *d, int pirq)
{
int emuirq, ret = 0;
- struct pirq *info;
if ( !is_hvm_domain(d) )
return -EINVAL;
@@ -2014,22 +1858,24 @@ int unmap_domain_pirq_emuirq(struct domain *d, int pirq)
goto done;
}
- info = pirq_info(d, pirq);
- if ( info )
- {
- info->arch.hvm.emuirq = IRQ_UNBOUND;
- pirq_cleanup_check(info, d, pirq);
- }
+ d->arch.pirq_emuirq[pirq] = IRQ_UNBOUND;
if ( emuirq != IRQ_PT )
- radix_tree_delete(&d->arch.hvm_domain.emuirq_pirq, emuirq, NULL);
+ d->arch.emuirq_pirq[emuirq] = IRQ_UNBOUND;
done:
return ret;
}
-bool_t hvm_domain_use_pirq(const struct domain *d, const struct pirq *pirq)
+int hvm_domain_use_pirq(struct domain *d, int pirq)
{
- return is_hvm_domain(d) &&
- pirq->arch.hvm.emuirq != IRQ_UNBOUND &&
- pirq->evtchn != 0;
+ int emuirq;
+
+ if ( !is_hvm_domain(d) )
+ return 0;
+
+ emuirq = domain_pirq_to_emuirq(d, pirq);
+ if ( emuirq != IRQ_UNBOUND && d->pirq_to_evtchn[pirq] != 0 )
+ return 1;
+ else
+ return 0;
}
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index fe3a39c098..e15dd0a95d 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -258,28 +258,20 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
case PHYSDEVOP_eoi: {
struct physdev_eoi eoi;
- struct pirq *pirq;
-
ret = -EFAULT;
if ( copy_from_guest(&eoi, arg, 1) != 0 )
break;
ret = -EINVAL;
if ( eoi.irq >= v->domain->nr_pirqs )
break;
- spin_lock(&v->domain->event_lock);
- pirq = pirq_info(v->domain, eoi.irq);
- if ( !pirq ) {
- spin_unlock(&v->domain->event_lock);
- break;
- }
if ( !is_hvm_domain(v->domain) &&
v->domain->arch.pv_domain.pirq_eoi_map )
- evtchn_unmask(pirq->evtchn);
+ evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]);
if ( !is_hvm_domain(v->domain) ||
- pirq->arch.hvm.emuirq == IRQ_PT )
- pirq_guest_eoi(v->domain, pirq);
- spin_unlock(&v->domain->event_lock);
- ret = 0;
+ domain_pirq_to_emuirq(v->domain, eoi.irq) == IRQ_PT )
+ ret = pirq_guest_eoi(v->domain, eoi.irq);
+ else
+ ret = 0;
break;
}
@@ -572,23 +564,11 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
break;
spin_lock(&d->event_lock);
- ret = get_free_pirq(d, out.type, 0);
- if ( ret >= 0 )
- {
- struct pirq *info = pirq_get_info(d, ret);
-
- if ( info )
- info->arch.irq = PIRQ_ALLOCATED;
- else
- ret = -ENOMEM;
- }
+ out.pirq = get_free_pirq(d, out.type, 0);
+ d->arch.pirq_irq[out.pirq] = PIRQ_ALLOCATED;
spin_unlock(&d->event_lock);
- if ( ret >= 0 )
- {
- out.pirq = ret;
- ret = copy_to_guest(arg, &out, 1) ? -EFAULT : 0;
- }
+ ret = copy_to_guest(arg, &out, 1) ? -EFAULT : 0;
rcu_unlock_domain(d);
break;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 40a2833762..852c968bba 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -290,7 +290,13 @@ struct domain *domain_create(
if ( d->nr_pirqs > nr_irqs )
d->nr_pirqs = nr_irqs;
- INIT_RADIX_TREE(&d->pirq_tree, 0);
+ d->pirq_to_evtchn = xmalloc_array(u16, d->nr_pirqs);
+ d->pirq_mask = xmalloc_array(
+ unsigned long, BITS_TO_LONGS(d->nr_pirqs));
+ if ( (d->pirq_to_evtchn == NULL) || (d->pirq_mask == NULL) )
+ goto fail;
+ memset(d->pirq_to_evtchn, 0, d->nr_pirqs * sizeof(*d->pirq_to_evtchn));
+ bitmap_zero(d->pirq_mask, d->nr_pirqs);
if ( evtchn_init(d) != 0 )
goto fail;
@@ -340,7 +346,6 @@ struct domain *domain_create(
{
evtchn_destroy(d);
evtchn_destroy_final(d);
- radix_tree_destroy(&d->pirq_tree, free_pirq_struct, NULL);
}
if ( init_status & INIT_rangeset )
rangeset_domain_destroy(d);
@@ -348,6 +353,8 @@ struct domain *domain_create(
watchdog_domain_destroy(d);
if ( init_status & INIT_xsm )
xsm_free_security_domain(d);
+ xfree(d->pirq_mask);
+ xfree(d->pirq_to_evtchn);
free_cpumask_var(d->domain_dirty_cpumask);
free_domain_struct(d);
return NULL;
@@ -673,7 +680,8 @@ static void complete_domain_destroy(struct rcu_head *head)
evtchn_destroy_final(d);
- radix_tree_destroy(&d->pirq_tree, free_pirq_struct, NULL);
+ xfree(d->pirq_mask);
+ xfree(d->pirq_to_evtchn);
xsm_free_security_domain(d);
free_cpumask_var(d->domain_dirty_cpumask);
@@ -955,20 +963,6 @@ long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
return -ENOSYS;
}
-struct pirq *pirq_get_info(struct domain *d, int pirq)
-{
- struct pirq *info = pirq_info(d, pirq);
-
- if ( !info && (info = alloc_pirq_struct(d)) != NULL &&
- radix_tree_insert(&d->pirq_tree, pirq, info, NULL, NULL) )
- {
- free_pirq_struct(info);
- info = NULL;
- }
-
- return info;
-}
-
struct migrate_info {
long (*func)(void *data);
void *data;
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 56d80b9bed..558fbb1256 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -325,7 +325,6 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
struct evtchn *chn;
struct domain *d = current->domain;
struct vcpu *v = d->vcpu[0];
- struct pirq *info;
int port, pirq = bind->pirq;
long rc;
@@ -337,7 +336,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
spin_lock(&d->event_lock);
- if ( pirq_to_evtchn(d, pirq) != 0 )
+ if ( d->pirq_to_evtchn[pirq] != 0 )
ERROR_EXIT(-EEXIST);
if ( (port = get_free_port(d)) < 0 )
@@ -345,18 +344,14 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
chn = evtchn_from_port(d, port);
- info = pirq_get_info(d, pirq);
- if ( !info )
- ERROR_EXIT(-ENOMEM);
- info->evtchn = port;
+ d->pirq_to_evtchn[pirq] = port;
rc = (!is_hvm_domain(d)
- ? pirq_guest_bind(v, pirq, info,
- !!(bind->flags & BIND_PIRQ__WILL_SHARE))
+ ? pirq_guest_bind(
+ v, pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE))
: 0);
if ( rc != 0 )
{
- info->evtchn = 0;
- pirq_cleanup_check(info, d, pirq);
+ d->pirq_to_evtchn[pirq] = 0;
goto out;
}
@@ -409,18 +404,12 @@ static long __evtchn_close(struct domain *d1, int port1)
case ECS_UNBOUND:
break;
- case ECS_PIRQ: {
- struct pirq *pirq = pirq_info(d1, chn1->u.pirq.irq);
-
- if ( !pirq )
- break;
+ case ECS_PIRQ:
if ( !is_hvm_domain(d1) )
- pirq_guest_unbind(d1, chn1->u.pirq.irq, pirq);
- pirq->evtchn = 0;
- pirq_cleanup_check(pirq, d1, chn1->u.pirq.irq);
+ pirq_guest_unbind(d1, chn1->u.pirq.irq);
+ d1->pirq_to_evtchn[chn1->u.pirq.irq] = 0;
unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
break;
- }
case ECS_VIRQ:
for_each_vcpu ( d1, v )
@@ -670,9 +659,9 @@ void send_guest_global_virq(struct domain *d, int virq)
spin_unlock_irqrestore(&v->virq_lock, flags);
}
-int send_guest_pirq(struct domain *d, const struct pirq *pirq)
+int send_guest_pirq(struct domain *d, int pirq)
{
- int port;
+ int port = d->pirq_to_evtchn[pirq];
struct evtchn *chn;
/*
@@ -681,7 +670,7 @@ int send_guest_pirq(struct domain *d, const struct pirq *pirq)
* HVM guests: Port is legitimately zero when the guest disables the
* emulated interrupt/evtchn.
*/
- if ( pirq == NULL || (port = pirq->evtchn) == 0 )
+ if ( port == 0 )
{
BUG_ON(!is_hvm_domain(d));
return 0;
@@ -823,10 +812,13 @@ int evtchn_unmask(unsigned int port)
struct domain *d = current->domain;
struct vcpu *v;
- ASSERT(spin_is_locked(&d->event_lock));
+ spin_lock(&d->event_lock);
if ( unlikely(!port_is_valid(d, port)) )
+ {
+ spin_unlock(&d->event_lock);
return -EINVAL;
+ }
v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
@@ -842,6 +834,8 @@ int evtchn_unmask(unsigned int port)
vcpu_mark_events_pending(v);
}
+ spin_unlock(&d->event_lock);
+
return 0;
}
@@ -966,9 +960,7 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
struct evtchn_unmask unmask;
if ( copy_from_guest(&unmask, arg, 1) != 0 )
return -EFAULT;
- spin_lock(&current->domain->event_lock);
rc = evtchn_unmask(unmask.port);
- spin_unlock(&current->domain->event_lock);
break;
}
diff --git a/xen/common/radix-tree.c b/xen/common/radix-tree.c
index f2736d9a00..e6e213c0a0 100644
--- a/xen/common/radix-tree.c
+++ b/xen/common/radix-tree.c
@@ -26,6 +26,7 @@
* o tagging code removed
* o radix_tree_insert has func parameter for dynamic data struct allocation
* o radix_tree_destroy added (including recursive helper function)
+ * o __init functions must be called explicitly
* o other include files adapted to Xen
*/
@@ -34,7 +35,6 @@
#include <xen/lib.h>
#include <xen/types.h>
#include <xen/errno.h>
-#include <xen/xmalloc.h>
#include <xen/radix-tree.h>
#include <asm/cache.h>
@@ -49,18 +49,6 @@ static inline unsigned long radix_tree_maxindex(unsigned int height)
return height_to_maxindex[height];
}
-static struct radix_tree_node *_node_alloc(void *unused)
-{
- struct radix_tree_node *node = xmalloc(struct radix_tree_node);
-
- return node ? memset(node, 0, sizeof(*node)) : node;
-}
-
-static void _node_free(struct radix_tree_node *node)
-{
- xfree(node);
-}
-
/*
* Extend a radix tree so it can store key @index.
*/
@@ -112,9 +100,6 @@ int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
int offset;
int error;
- if (!node_alloc)
- node_alloc = _node_alloc;
-
/* Make sure the tree is high enough. */
if (index > radix_tree_maxindex(root->height)) {
error = radix_tree_extend(root, index, node_alloc, arg);
@@ -225,8 +210,7 @@ EXPORT_SYMBOL(radix_tree_lookup);
static unsigned int
__lookup(struct radix_tree_root *root, void **results, unsigned long index,
- unsigned int max_items, unsigned long *indexes,
- unsigned long *next_index)
+ unsigned int max_items, unsigned long *next_index)
{
unsigned int nr_found = 0;
unsigned int shift, height;
@@ -236,11 +220,8 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
height = root->height;
if (index > radix_tree_maxindex(height))
if (height == 0) {
- if (root->rnode && index == 0) {
- if (indexes)
- indexes[nr_found] = index;
+ if (root->rnode && index == 0)
results[nr_found++] = root->rnode;
- }
goto out;
}
@@ -269,8 +250,6 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
index++;
if (slot->slots[i]) {
- if (indexes)
- indexes[nr_found] = index - 1;
results[nr_found++] = slot->slots[i];
if (nr_found == max_items)
goto out;
@@ -287,7 +266,6 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
* @results: where the results of the lookup are placed
* @first_index: start the lookup from this key
* @max_items: place up to this many items at *results
- * @indexes: (optional) array to store indexes of items.
*
* Performs an index-ascending scan of the tree for present items. Places
* them at *@results and returns the number of items which were placed at
@@ -297,8 +275,7 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
*/
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
- unsigned long first_index, unsigned int max_items,
- unsigned long *indexes)
+ unsigned long first_index, unsigned int max_items)
{
const unsigned long max_index = radix_tree_maxindex(root->height);
unsigned long cur_index = first_index;
@@ -311,7 +288,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
if (cur_index > max_index)
break;
nr_found = __lookup(root, results + ret, cur_index,
- max_items - ret, indexes + ret, &next_index);
+ max_items - ret, &next_index);
ret += nr_found;
if (next_index == 0)
break;
@@ -359,9 +336,6 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index,
unsigned int height, shift;
int offset;
- if (!node_free)
- node_free = _node_free;
-
height = root->height;
if (index > radix_tree_maxindex(height))
goto out;
@@ -446,8 +420,6 @@ void radix_tree_destroy(struct radix_tree_root *root,
if (root->height == 0)
slot_free(root->rnode);
else {
- if (!node_free)
- node_free = _node_free;
radix_tree_node_destroy(root->rnode, root->height,
slot_free, node_free);
node_free(root->rnode);
@@ -468,14 +440,10 @@ static unsigned long __init __maxindex(unsigned int height)
return index;
}
-static int __init radix_tree_init(void)
+void __init radix_tree_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
height_to_maxindex[i] = __maxindex(i);
-
- return 0;
}
-/* pre-SMP just so it runs before 'normal' initcalls */
-presmp_initcall(radix_tree_init);
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index c461e99290..1c155db690 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -2925,6 +2925,7 @@ static int __init init_tmem(void)
if ( !tmh_enabled() )
return 0;
+ radix_tree_init();
if ( tmh_dedup_enabled() )
for (i = 0; i < 256; i++ )
{
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 55e608e679..67b022325c 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -35,28 +35,18 @@ bool_t pt_irq_need_timer(uint32_t flags)
return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE));
}
-static int pt_irq_guest_eoi(struct domain *d, unsigned int pirq,
- struct hvm_pirq_dpci *pirq_dpci, void *arg)
-{
- if ( __test_and_clear_bit(_HVM_IRQ_DPCI_EOI_LATCH_SHIFT,
- &pirq_dpci->flags) )
- {
- pirq_dpci->masked = 0;
- pirq_dpci->pending = 0;
- pirq_guest_eoi(d, dpci_pirq(pirq_dpci));
- }
-
- return 0;
-}
-
static void pt_irq_time_out(void *data)
{
- struct hvm_pirq_dpci *irq_map = data;
- unsigned int guest_gsi;
+ struct hvm_mirq_dpci_mapping *irq_map = data;
+ unsigned int guest_gsi, machine_gsi = 0;
struct hvm_irq_dpci *dpci = NULL;
struct dev_intx_gsi_link *digl;
struct hvm_girq_dpci_mapping *girq;
uint32_t device, intx;
+ unsigned int nr_pirqs = irq_map->dom->nr_pirqs;
+ DECLARE_BITMAP(machine_gsi_map, nr_pirqs);
+
+ bitmap_zero(machine_gsi_map, nr_pirqs);
spin_lock(&irq_map->dom->event_lock);
@@ -67,18 +57,32 @@ static void pt_irq_time_out(void *data)
guest_gsi = digl->gsi;
list_for_each_entry ( girq, &dpci->girq[guest_gsi], list )
{
- struct pirq *pirq = pirq_info(irq_map->dom, girq->machine_gsi);
-
- pirq_dpci(pirq)->flags |= HVM_IRQ_DPCI_EOI_LATCH;
+ machine_gsi = girq->machine_gsi;
+ set_bit(machine_gsi, machine_gsi_map);
}
device = digl->device;
intx = digl->intx;
hvm_pci_intx_deassert(irq_map->dom, device, intx);
}
- pt_pirq_iterate(irq_map->dom, pt_irq_guest_eoi, NULL);
+ for ( machine_gsi = find_first_bit(machine_gsi_map, nr_pirqs);
+ machine_gsi < nr_pirqs;
+ machine_gsi = find_next_bit(machine_gsi_map, nr_pirqs,
+ machine_gsi + 1) )
+ {
+ clear_bit(machine_gsi, dpci->dirq_mask);
+ dpci->mirq[machine_gsi].pending = 0;
+ }
spin_unlock(&irq_map->dom->event_lock);
+
+ for ( machine_gsi = find_first_bit(machine_gsi_map, nr_pirqs);
+ machine_gsi < nr_pirqs;
+ machine_gsi = find_next_bit(machine_gsi_map, nr_pirqs,
+ machine_gsi + 1) )
+ {
+ pirq_guest_eoi(irq_map->dom, machine_gsi);
+ }
}
struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *d)
@@ -91,6 +95,10 @@ struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *d)
void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci)
{
+ xfree(dpci->mirq);
+ xfree(dpci->dirq_mask);
+ xfree(dpci->mapping);
+ xfree(dpci->hvm_timer);
xfree(dpci);
}
@@ -98,9 +106,7 @@ int pt_irq_create_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
struct hvm_irq_dpci *hvm_irq_dpci = NULL;
- struct hvm_pirq_dpci *pirq_dpci;
- struct pirq *info;
- uint32_t guest_gsi;
+ uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct dev_intx_gsi_link *digl;
struct hvm_girq_dpci_mapping *girq;
@@ -123,45 +129,63 @@ int pt_irq_create_bind_vtd(
memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
tasklet_init(&hvm_irq_dpci->dirq_tasklet,
hvm_dirq_assist, (unsigned long)d);
+ hvm_irq_dpci->mirq = xmalloc_array(struct hvm_mirq_dpci_mapping,
+ d->nr_pirqs);
+ hvm_irq_dpci->dirq_mask = xmalloc_array(unsigned long,
+ BITS_TO_LONGS(d->nr_pirqs));
+ hvm_irq_dpci->mapping = xmalloc_array(unsigned long,
+ BITS_TO_LONGS(d->nr_pirqs));
+ hvm_irq_dpci->hvm_timer = xmalloc_array(struct timer, d->nr_pirqs);
+ if ( !hvm_irq_dpci->mirq ||
+ !hvm_irq_dpci->dirq_mask ||
+ !hvm_irq_dpci->mapping ||
+ !hvm_irq_dpci->hvm_timer)
+ {
+ spin_unlock(&d->event_lock);
+ free_hvm_irq_dpci(hvm_irq_dpci);
+ return -ENOMEM;
+ }
+ memset(hvm_irq_dpci->mirq, 0,
+ d->nr_pirqs * sizeof(*hvm_irq_dpci->mirq));
+ bitmap_zero(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
+ bitmap_zero(hvm_irq_dpci->mapping, d->nr_pirqs);
+ memset(hvm_irq_dpci->hvm_timer, 0,
+ d->nr_pirqs * sizeof(*hvm_irq_dpci->hvm_timer));
+ for ( int i = 0; i < d->nr_pirqs; i++ ) {
+ INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
+ hvm_irq_dpci->mirq[i].gmsi.dest_vcpu_id = -1;
+ }
for ( int i = 0; i < NR_HVM_IRQS; i++ )
INIT_LIST_HEAD(&hvm_irq_dpci->girq[i]);
d->arch.hvm_domain.irq.dpci = hvm_irq_dpci;
}
- info = pirq_get_info(d, pirq);
- if ( !info )
- {
- spin_unlock(&d->event_lock);
- return -ENOMEM;
- }
- pirq_dpci = pirq_dpci(info);
-
if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
{
uint8_t dest, dest_mode;
int dest_vcpu_id;
- if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
+ if ( !test_and_set_bit(pirq, hvm_irq_dpci->mapping))
{
- pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_MSI |
- HVM_IRQ_DPCI_GUEST_MSI;
- pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec;
- pirq_dpci->gmsi.gflags = pt_irq_bind->u.msi.gflags;
+ hvm_irq_dpci->mirq[pirq].flags = HVM_IRQ_DPCI_MACH_MSI |
+ HVM_IRQ_DPCI_GUEST_MSI;
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
+ hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
/* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
- rc = pirq_guest_bind(d->vcpu[0], pirq, info, 0);
+ rc = pirq_guest_bind(d->vcpu[0], pirq, 0);
if ( rc == 0 && pt_irq_bind->u.msi.gtable )
{
- rc = msixtbl_pt_register(d, info, pt_irq_bind->u.msi.gtable);
+ rc = msixtbl_pt_register(d, pirq, pt_irq_bind->u.msi.gtable);
if ( unlikely(rc) )
- pirq_guest_unbind(d, pirq, info);
+ pirq_guest_unbind(d, pirq);
}
if ( unlikely(rc) )
{
- pirq_dpci->gmsi.gflags = 0;
- pirq_dpci->gmsi.gvec = 0;
- pirq_dpci->flags = 0;
- pirq_cleanup_check(info, d, pirq);
+ hvm_irq_dpci->mirq[pirq].gmsi.gflags = 0;
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec = 0;
+ hvm_irq_dpci->mirq[pirq].flags = 0;
+ clear_bit(pirq, hvm_irq_dpci->mapping);
spin_unlock(&d->event_lock);
return rc;
}
@@ -170,33 +194,34 @@ int pt_irq_create_bind_vtd(
{
uint32_t mask = HVM_IRQ_DPCI_MACH_MSI | HVM_IRQ_DPCI_GUEST_MSI;
- if ( (pirq_dpci->flags & mask) != mask)
+ if ( (hvm_irq_dpci->mirq[pirq].flags & mask) != mask)
{
spin_unlock(&d->event_lock);
return -EBUSY;
}
/* if pirq is already mapped as vmsi, update the guest data/addr */
- if ( pirq_dpci->gmsi.gvec != pt_irq_bind->u.msi.gvec ||
- pirq_dpci->gmsi.gflags != pt_irq_bind->u.msi.gflags) {
+ if ( hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec ||
+ hvm_irq_dpci->mirq[pirq].gmsi.gflags != pt_irq_bind->u.msi.gflags) {
/* Directly clear pending EOIs before enabling new MSI info. */
- pirq_guest_eoi(d, info);
+ pirq_guest_eoi(d, pirq);
- pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec;
- pirq_dpci->gmsi.gflags = pt_irq_bind->u.msi.gflags;
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
+ hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
}
}
/* Caculate dest_vcpu_id for MSI-type pirq migration */
- dest = pirq_dpci->gmsi.gflags & VMSI_DEST_ID_MASK;
- dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK);
+ dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
+ dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
- pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
+ hvm_irq_dpci->mirq[pirq].gmsi.dest_vcpu_id = dest_vcpu_id;
spin_unlock(&d->event_lock);
if ( dest_vcpu_id >= 0 )
hvm_migrate_pirqs(d->vcpu[dest_vcpu_id]);
}
else
{
+ machine_gsi = pt_irq_bind->machine_irq;
device = pt_irq_bind->u.pci.device;
intx = pt_irq_bind->u.pci.intx;
guest_gsi = hvm_pci_intx_gsi(device, intx);
@@ -222,51 +247,50 @@ int pt_irq_create_bind_vtd(
digl->intx = intx;
digl->gsi = guest_gsi;
digl->link = link;
- list_add_tail(&digl->list, &pirq_dpci->digl_list);
+ list_add_tail(&digl->list,
+ &hvm_irq_dpci->mirq[machine_gsi].digl_list);
girq->device = device;
girq->intx = intx;
- girq->machine_gsi = pirq;
+ girq->machine_gsi = machine_gsi;
list_add_tail(&girq->list, &hvm_irq_dpci->girq[guest_gsi]);
/* Bind the same mirq once in the same domain */
- if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
+ if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
{
unsigned int share;
- pirq_dpci->dom = d;
+ hvm_irq_dpci->mirq[machine_gsi].dom = d;
if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI_TRANSLATE )
{
- pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED |
- HVM_IRQ_DPCI_MACH_MSI |
- HVM_IRQ_DPCI_GUEST_PCI |
- HVM_IRQ_DPCI_TRANSLATE;
+ hvm_irq_dpci->mirq[machine_gsi].flags = HVM_IRQ_DPCI_MACH_MSI |
+ HVM_IRQ_DPCI_GUEST_PCI |
+ HVM_IRQ_DPCI_TRANSLATE;
share = 0;
}
else /* PT_IRQ_TYPE_PCI */
{
- pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED |
- HVM_IRQ_DPCI_MACH_PCI |
- HVM_IRQ_DPCI_GUEST_PCI;
+ hvm_irq_dpci->mirq[machine_gsi].flags = HVM_IRQ_DPCI_MACH_PCI |
+ HVM_IRQ_DPCI_GUEST_PCI;
share = BIND_PIRQ__WILL_SHARE;
}
/* Init timer before binding */
- if ( pt_irq_need_timer(pirq_dpci->flags) )
- init_timer(&pirq_dpci->timer, pt_irq_time_out, pirq_dpci, 0);
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
+ init_timer(&hvm_irq_dpci->hvm_timer[machine_gsi],
+ pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
/* Deal with gsi for legacy devices */
- rc = pirq_guest_bind(d->vcpu[0], pirq, info, share);
+ rc = pirq_guest_bind(d->vcpu[0], machine_gsi, share);
if ( unlikely(rc) )
{
- if ( pt_irq_need_timer(pirq_dpci->flags) )
- kill_timer(&pirq_dpci->timer);
- pirq_dpci->dom = NULL;
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
+ kill_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]);
+ hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
+ clear_bit(machine_gsi, hvm_irq_dpci->mapping);
list_del(&girq->list);
xfree(girq);
list_del(&digl->list);
hvm_irq_dpci->link_cnt[link]--;
- pirq_dpci->flags = 0;
- pirq_cleanup_check(info, d, pirq);
spin_unlock(&d->event_lock);
xfree(digl);
return rc;
@@ -278,7 +302,7 @@ int pt_irq_create_bind_vtd(
if ( iommu_verbose )
dprintk(VTDPREFIX,
"d%d: bind: m_gsi=%u g_gsi=%u device=%u intx=%u\n",
- d->domain_id, pirq, guest_gsi, device, intx);
+ d->domain_id, machine_gsi, guest_gsi, device, intx);
}
return 0;
}
@@ -287,12 +311,11 @@ int pt_irq_destroy_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
struct hvm_irq_dpci *hvm_irq_dpci = NULL;
- struct hvm_pirq_dpci *pirq_dpci;
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
- struct dev_intx_gsi_link *digl, *tmp;
+ struct list_head *digl_list, *tmp;
+ struct dev_intx_gsi_link *digl;
struct hvm_girq_dpci_mapping *girq;
- struct pirq *pirq;
machine_gsi = pt_irq_bind->machine_irq;
device = pt_irq_bind->u.pci.device;
@@ -327,14 +350,14 @@ int pt_irq_destroy_bind_vtd(
}
}
- pirq = pirq_info(d, machine_gsi);
- pirq_dpci = pirq_dpci(pirq);
-
/* clear the mirq info */
- if ( pirq_dpci && (pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
+ if ( test_bit(machine_gsi, hvm_irq_dpci->mapping))
{
- list_for_each_entry_safe ( digl, tmp, &pirq_dpci->digl_list, list )
+ list_for_each_safe ( digl_list, tmp,
+ &hvm_irq_dpci->mirq[machine_gsi].digl_list )
{
+ digl = list_entry(digl_list,
+ struct dev_intx_gsi_link, list);
if ( digl->device == device &&
digl->intx == intx &&
digl->link == link &&
@@ -345,15 +368,15 @@ int pt_irq_destroy_bind_vtd(
}
}
- if ( list_empty(&pirq_dpci->digl_list) )
+ if ( list_empty(&hvm_irq_dpci->mirq[machine_gsi].digl_list) )
{
- pirq_guest_unbind(d, machine_gsi, pirq);
- msixtbl_pt_unregister(d, pirq);
- if ( pt_irq_need_timer(pirq_dpci->flags) )
- kill_timer(&pirq_dpci->timer);
- pirq_dpci->dom = NULL;
- pirq_dpci->flags = 0;
- pirq_cleanup_check(pirq, d, machine_gsi);
+ pirq_guest_unbind(d, machine_gsi);
+ msixtbl_pt_unregister(d, machine_gsi);
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
+ kill_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]);
+ hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
+ hvm_irq_dpci->mirq[machine_gsi].flags = 0;
+ clear_bit(machine_gsi, hvm_irq_dpci->mapping);
}
}
spin_unlock(&d->event_lock);
@@ -366,156 +389,120 @@ int pt_irq_destroy_bind_vtd(
return 0;
}
-void pt_pirq_init(struct domain *d, struct hvm_pirq_dpci *dpci)
-{
- INIT_LIST_HEAD(&dpci->digl_list);
- dpci->gmsi.dest_vcpu_id = -1;
-}
-
-bool_t pt_pirq_cleanup_check(struct hvm_pirq_dpci *dpci)
-{
- return !dpci->flags;
-}
-
-int pt_pirq_iterate(struct domain *d,
- int (*cb)(struct domain *, unsigned int,
- struct hvm_pirq_dpci *, void *),
- void *arg)
-{
- int rc = 0;
- unsigned int pirq = 0, n, i;
- unsigned long indexes[8];
- struct pirq *pirqs[ARRAY_SIZE(indexes)];
-
- ASSERT(spin_is_locked(&d->event_lock));
-
- do {
- n = radix_tree_gang_lookup(&d->pirq_tree, (void **)pirqs, pirq,
- ARRAY_SIZE(pirqs), indexes);
- for ( i = 0; i < n; ++i )
- {
- struct hvm_pirq_dpci *pirq_dpci = pirq_dpci(pirqs[i]);
-
- pirq = indexes[i];
- if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
- rc = cb(d, pirq, pirq_dpci, arg);
- }
- } while ( !rc && ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
-
- return rc;
-}
-
-int hvm_do_IRQ_dpci(struct domain *d, struct pirq *pirq)
+int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
{
struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
- struct hvm_pirq_dpci *pirq_dpci = pirq_dpci(pirq);
- if ( !iommu_enabled || !dpci || !pirq_dpci ||
- !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
+ ASSERT(spin_is_locked(&irq_desc[domain_pirq_to_irq(d, mirq)].lock));
+ if ( !iommu_enabled || !dpci || !test_bit(mirq, dpci->mapping))
return 0;
- pirq_dpci->masked = 1;
+ set_bit(mirq, dpci->dirq_mask);
tasklet_schedule(&dpci->dirq_tasklet);
return 1;
}
#ifdef SUPPORT_MSI_REMAPPING
/* called with d->event_lock held */
-static void __msi_pirq_eoi(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
+static void __msi_pirq_eoi(struct domain *d, int pirq)
{
+ struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
irq_desc_t *desc;
- if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) &&
- (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) )
+ if ( ( pirq >= 0 ) && ( pirq < d->nr_pirqs ) &&
+ test_bit(pirq, hvm_irq_dpci->mapping) &&
+ ( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI) )
{
- struct pirq *pirq = dpci_pirq(pirq_dpci);
-
BUG_ON(!local_irq_is_enabled());
- desc = pirq_spin_lock_irq_desc(d, pirq, NULL);
+ desc = domain_spin_lock_irq_desc(d, pirq, NULL);
if ( !desc )
return;
desc->status &= ~IRQ_INPROGRESS;
- desc_guest_eoi(d, desc, pirq);
- }
-}
-
-static int _hvm_dpci_msi_eoi(struct domain *d, unsigned int pirq,
- struct hvm_pirq_dpci *pirq_dpci, void *arg)
-{
- int vector = (long)arg;
-
- if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) &&
- (pirq_dpci->gmsi.gvec == vector) )
- {
- int dest = pirq_dpci->gmsi.gflags & VMSI_DEST_ID_MASK;
- int dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK);
+ spin_unlock_irq(&desc->lock);
- if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest,
- dest_mode) )
- {
- __msi_pirq_eoi(d, pirq_dpci);
- return 1;
- }
+ pirq_guest_eoi(d, pirq);
}
-
- return 0;
}
void hvm_dpci_msi_eoi(struct domain *d, int vector)
{
- if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci )
+ int pirq, dest, dest_mode;
+ struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+
+ if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
return;
spin_lock(&d->event_lock);
- pt_pirq_iterate(d, _hvm_dpci_msi_eoi, (void *)(long)vector);
+ for ( pirq = find_first_bit(hvm_irq_dpci->mapping, d->nr_pirqs);
+ pirq < d->nr_pirqs;
+ pirq = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, pirq + 1) )
+ {
+ if ( (!(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI)) ||
+ (hvm_irq_dpci->mirq[pirq].gmsi.gvec != vector) )
+ continue;
+
+ dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
+ dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
+ if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, dest_mode) )
+ break;
+ }
+ if ( pirq < d->nr_pirqs )
+ __msi_pirq_eoi(d, pirq);
spin_unlock(&d->event_lock);
}
-static int hvm_pci_msi_assert(struct domain *d,
- struct hvm_pirq_dpci *pirq_dpci)
+extern int vmsi_deliver(struct domain *d, int pirq);
+static int hvm_pci_msi_assert(struct domain *d, int pirq)
{
- struct pirq *pirq = dpci_pirq(pirq_dpci);
-
if ( hvm_domain_use_pirq(d, pirq) )
return send_guest_pirq(d, pirq);
else
- return vmsi_deliver(d, pirq_dpci);
+ return vmsi_deliver(d, pirq);
}
#endif
-static int _hvm_dirq_assist(struct domain *d, unsigned int pirq,
- struct hvm_pirq_dpci *pirq_dpci, void *arg)
+static void hvm_dirq_assist(unsigned long _d)
{
+ unsigned int pirq;
uint32_t device, intx;
+ struct domain *d = (struct domain *)_d;
+ struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
struct dev_intx_gsi_link *digl;
- if ( test_and_clear_bool(pirq_dpci->masked) )
+ ASSERT(hvm_irq_dpci);
+
+ for ( pirq = find_first_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
+ pirq < d->nr_pirqs;
+ pirq = find_next_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs, pirq + 1) )
{
+ if ( !test_and_clear_bit(pirq, hvm_irq_dpci->dirq_mask) )
+ continue;
+
+ spin_lock(&d->event_lock);
#ifdef SUPPORT_MSI_REMAPPING
- if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
+ if ( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI )
{
- hvm_pci_msi_assert(d, pirq_dpci);
- return 0;
+ hvm_pci_msi_assert(d, pirq);
+ spin_unlock(&d->event_lock);
+ continue;
}
#endif
- list_for_each_entry ( digl, &pirq_dpci->digl_list, list )
+ list_for_each_entry ( digl, &hvm_irq_dpci->mirq[pirq].digl_list, list )
{
- struct pirq *info = dpci_pirq(pirq_dpci);
-
device = digl->device;
intx = digl->intx;
- if ( hvm_domain_use_pirq(d, info) )
- send_guest_pirq(d, info);
+ if ( hvm_domain_use_pirq(d, pirq) )
+ send_guest_pirq(d, pirq);
else
hvm_pci_intx_assert(d, device, intx);
- pirq_dpci->pending++;
+ hvm_irq_dpci->mirq[pirq].pending++;
#ifdef SUPPORT_MSI_REMAPPING
- if ( pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE )
+ if ( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_TRANSLATE )
{
/* for translated MSI to INTx interrupt, eoi as early as possible */
- __msi_pirq_eoi(d, pirq_dpci);
+ __msi_pirq_eoi(d, pirq);
}
#endif
}
@@ -527,50 +514,37 @@ static int _hvm_dirq_assist(struct domain *d, unsigned int pirq,
* guest will never deal with the irq, then the physical interrupt line
* will never be deasserted.
*/
- if ( pt_irq_need_timer(pirq_dpci->flags) )
- set_timer(&pirq_dpci->timer, NOW() + PT_IRQ_TIME_OUT);
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[pirq].flags) )
+ set_timer(&hvm_irq_dpci->hvm_timer[pirq],
+ NOW() + PT_IRQ_TIME_OUT);
+ spin_unlock(&d->event_lock);
}
-
- return 0;
-}
-
-static void hvm_dirq_assist(unsigned long _d)
-{
- struct domain *d = (struct domain *)_d;
-
- ASSERT(d->arch.hvm_domain.irq.dpci);
-
- spin_lock(&d->event_lock);
- pt_pirq_iterate(d, _hvm_dirq_assist, NULL);
- spin_unlock(&d->event_lock);
}
static void __hvm_dpci_eoi(struct domain *d,
+ struct hvm_irq_dpci *hvm_irq_dpci,
struct hvm_girq_dpci_mapping *girq,
union vioapic_redir_entry *ent)
{
- uint32_t device, intx;
- struct pirq *pirq;
- struct hvm_pirq_dpci *pirq_dpci;
+ uint32_t device, intx, machine_gsi;
device = girq->device;
intx = girq->intx;
hvm_pci_intx_deassert(d, device, intx);
- pirq = pirq_info(d, girq->machine_gsi);
- pirq_dpci = pirq_dpci(pirq);
+ machine_gsi = girq->machine_gsi;
/*
* No need to get vector lock for timer
* since interrupt is still not EOIed
*/
- if ( --pirq_dpci->pending ||
+ if ( --hvm_irq_dpci->mirq[machine_gsi].pending ||
( ent && ent->fields.mask ) ||
- ! pt_irq_need_timer(pirq_dpci->flags) )
+ ! pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
return;
- stop_timer(&pirq_dpci->timer);
- pirq_guest_eoi(d, pirq);
+ stop_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]);
+ pirq_guest_eoi(d, machine_gsi);
}
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
@@ -595,7 +569,7 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
goto unlock;
list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
- __hvm_dpci_eoi(d, girq, ent);
+ __hvm_dpci_eoi(d, hvm_irq_dpci, girq, ent);
unlock:
spin_unlock(&d->event_lock);
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 2e398659c6..e30cd950a3 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -236,28 +236,12 @@ out:
return ret;
}
-static int pci_clean_dpci_irq(struct domain *d, unsigned int pirq,
- struct hvm_pirq_dpci *pirq_dpci, void *arg)
-{
- struct dev_intx_gsi_link *digl, *tmp;
-
- pirq_guest_unbind(d, pirq, dpci_pirq(pirq_dpci));
-
- if ( pt_irq_need_timer(pirq_dpci->flags) )
- kill_timer(&pirq_dpci->timer);
-
- list_for_each_entry_safe ( digl, tmp, &pirq_dpci->digl_list, list )
- {
- list_del(&digl->list);
- xfree(digl);
- }
-
- return 0;
-}
-
static void pci_clean_dpci_irqs(struct domain *d)
{
struct hvm_irq_dpci *hvm_irq_dpci = NULL;
+ uint32_t i;
+ struct list_head *digl_list, *tmp;
+ struct dev_intx_gsi_link *digl;
if ( !iommu_enabled )
return;
@@ -271,7 +255,24 @@ static void pci_clean_dpci_irqs(struct domain *d)
{
tasklet_kill(&hvm_irq_dpci->dirq_tasklet);
- pt_pirq_iterate(d, pci_clean_dpci_irq, NULL);
+ for ( i = find_first_bit(hvm_irq_dpci->mapping, d->nr_pirqs);
+ i < d->nr_pirqs;
+ i = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, i + 1) )
+ {
+ pirq_guest_unbind(d, i);
+
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[i].flags) )
+ kill_timer(&hvm_irq_dpci->hvm_timer[i]);
+
+ list_for_each_safe ( digl_list, tmp,
+ &hvm_irq_dpci->mirq[i].digl_list )
+ {
+ digl = list_entry(digl_list,
+ struct dev_intx_gsi_link, list);
+ list_del(&digl->list);
+ xfree(digl);
+ }
+ }
d->arch.hvm_domain.irq.dpci = NULL;
free_hvm_irq_dpci(hvm_irq_dpci);
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c
index c3a974a49d..6a33d7e557 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -68,32 +68,12 @@ void *__init map_to_nocache_virt(int nr_iommus, u64 maddr)
return (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
}
-static int _hvm_dpci_isairq_eoi(struct domain *d, unsigned int pirq,
- struct hvm_pirq_dpci *pirq_dpci, void *arg)
-{
- struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
- unsigned int isairq = (long)arg;
- struct dev_intx_gsi_link *digl, *tmp;
-
- list_for_each_entry_safe ( digl, tmp, &pirq_dpci->digl_list, list )
- {
- if ( hvm_irq->pci_link.route[digl->link] == isairq )
- {
- hvm_pci_intx_deassert(d, digl->device, digl->intx);
- if ( --pirq_dpci->pending == 0 )
- {
- stop_timer(&pirq_dpci->timer);
- pirq_guest_eoi(d, dpci_pirq(pirq_dpci));
- }
- }
- }
-
- return 0;
-}
-
void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
{
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
struct hvm_irq_dpci *dpci = NULL;
+ struct dev_intx_gsi_link *digl, *tmp;
+ int i;
ASSERT(isairq < NR_ISAIRQS);
if ( !iommu_enabled)
@@ -103,10 +83,29 @@ void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
dpci = domain_get_irq_dpci(d);
- if ( dpci && test_bit(isairq, dpci->isairq_map) )
+ if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
{
- /* Multiple mirq may be mapped to one isa irq */
- pt_pirq_iterate(d, _hvm_dpci_isairq_eoi, (void *)(long)isairq);
+ spin_unlock(&d->event_lock);
+ return;
+ }
+ /* Multiple mirq may be mapped to one isa irq */
+ for ( i = find_first_bit(dpci->mapping, d->nr_pirqs);
+ i < d->nr_pirqs;
+ i = find_next_bit(dpci->mapping, d->nr_pirqs, i + 1) )
+ {
+ list_for_each_entry_safe ( digl, tmp,
+ &dpci->mirq[i].digl_list, list )
+ {
+ if ( hvm_irq->pci_link.route[digl->link] == isairq )
+ {
+ hvm_pci_intx_deassert(d, digl->device, digl->intx);
+ if ( --dpci->mirq[i].pending == 0 )
+ {
+ stop_timer(&dpci->hvm_timer[i]);
+ pirq_guest_eoi(d, i);
+ }
+ }
+ }
}
spin_unlock(&d->event_lock);
}
diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h
index a1dae862bc..e5b71e0011 100644
--- a/xen/include/asm-ia64/domain.h
+++ b/xen/include/asm-ia64/domain.h
@@ -11,7 +11,6 @@
#include <xen/list.h>
#include <xen/cpumask.h>
#include <xen/mm.h>
-#include <xen/hvm/irq.h>
#include <asm/fpswa.h>
#include <xen/rangeset.h>
@@ -317,23 +316,6 @@ struct arch_vcpu {
cpumask_t cache_coherent_map;
};
-struct arch_pirq {
- struct hvm_pirq_dpci dpci;
-};
-
-#define pirq_dpci(pirq) ((pirq) ? &(pirq)->arch.dpci : NULL)
-#define dpci_pirq(dpci) container_of(dpci, struct pirq, arch.dpci)
-
-#define alloc_pirq_struct(d) ({ \
- struct pirq *pirq = xmalloc(struct pirq); \
- if ( pirq ) \
- { \
- memset(pirq, 0, sizeof(*pirq)); \
- pt_pirq_init(d, &pirq->arch.dpci); \
- } \
- pirq; \
-})
-
#include <asm/uaccess.h> /* for KERNEL_DS */
#include <asm/pgtable.h>
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index a23542ad00..244338ac6b 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -3,7 +3,6 @@
#include <xen/config.h>
#include <xen/mm.h>
-#include <xen/radix-tree.h>
#include <asm/hvm/vcpu.h>
#include <asm/hvm/domain.h>
#include <asm/e820.h>
@@ -285,7 +284,11 @@ struct arch_domain
const char *nested_p2m_function;
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
- struct radix_tree_root irq_pirq;
+ int *irq_pirq;
+ int *pirq_irq;
+ /* pirq to emulated irq and vice versa */
+ int *emuirq_pirq;
+ int *pirq_emuirq;
/* Maximum physical-address bitwidth supported by this guest. */
unsigned int physaddr_bitsize;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 27b3de50dc..ab24a09f25 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -59,9 +59,6 @@ struct hvm_domain {
/* VCPU which is current target for 8259 interrupts. */
struct vcpu *i8259_target;
- /* emulated irq to pirq */
- struct radix_tree_root emuirq_pirq;
-
/* hvm_print_line() logging. */
#define HVM_PBUF_SIZE 80
char *pbuf;
diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h
index b57d4670e2..06e9884db4 100644
--- a/xen/include/asm-x86/hvm/irq.h
+++ b/xen/include/asm-x86/hvm/irq.h
@@ -111,6 +111,4 @@ struct hvm_intack hvm_vcpu_ack_pending_irq(struct vcpu *v,
*/
#define SUPPORT_MSI_REMAPPING 1
-void msixtbl_pt_cleanup(struct domain *d);
-
#endif /* __ASM_X86_HVM_IRQ_H__ */
diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h
index 12512e3152..b216f16c7a 100644
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -7,7 +7,6 @@
#include <asm/atomic.h>
#include <xen/cpumask.h>
#include <xen/smp.h>
-#include <xen/hvm/irq.h>
#include <irq_vectors.h>
#include <asm/percpu.h>
@@ -106,20 +105,6 @@ extern unsigned int io_apic_irqs;
DECLARE_PER_CPU(unsigned int, irq_count);
-struct pirq;
-struct arch_pirq {
- int irq;
- union {
- struct hvm_pirq {
- int emuirq;
- struct hvm_pirq_dpci dpci;
- } hvm;
- };
-};
-
-#define pirq_dpci(pirq) ((pirq) ? &(pirq)->arch.hvm.dpci : NULL)
-#define dpci_pirq(pd) container_of(pd, struct pirq, arch.hvm.dpci)
-
int pirq_shared(struct domain *d , int irq);
int map_domain_pirq(struct domain *d, int pirq, int irq, int type,
@@ -129,7 +114,7 @@ int get_free_pirq(struct domain *d, int type, int index);
void free_domain_pirqs(struct domain *d);
int map_domain_emuirq_pirq(struct domain *d, int pirq, int irq);
int unmap_domain_pirq_emuirq(struct domain *d, int pirq);
-bool_t hvm_domain_use_pirq(const struct domain *, const struct pirq *);
+int hvm_domain_use_pirq(struct domain *d, int irq);
int init_irq_data(void);
@@ -158,17 +143,11 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain);
void irq_set_affinity(struct irq_desc *, const cpumask_t *mask);
-int init_domain_irq_mapping(struct domain *);
-void cleanup_domain_irq_mapping(struct domain *);
-
-#define domain_pirq_to_irq(d, pirq) pirq_field(d, pirq, arch.irq)
-#define domain_irq_to_pirq(d, irq) \
- ((long)radix_tree_lookup(&(d)->arch.irq_pirq, irq))
+#define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
+#define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
#define PIRQ_ALLOCATED -1
-#define domain_pirq_to_emuirq(d, pirq) pirq_field(d, pirq, arch.hvm.emuirq)
-#define domain_emuirq_to_pirq(d, emuirq) \
- (((long)radix_tree_lookup(&(d)->arch.hvm_domain.emuirq_pirq, emuirq) ?: \
- IRQ_UNBOUND + 1) - 1)
+#define domain_pirq_to_emuirq(d, pirq) ((d)->arch.pirq_emuirq[pirq])
+#define domain_emuirq_to_pirq(d, emuirq) ((d)->arch.emuirq_pirq[emuirq])
#define IRQ_UNBOUND -1
#define IRQ_PT -2
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 72ec6c1c06..ddebbe14a9 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -38,12 +38,6 @@ struct vcpu_guest_context *alloc_vcpu_guest_context(void);
void free_vcpu_guest_context(struct vcpu_guest_context *);
#endif
-/* Allocate/free a PIRQ structure. */
-#ifndef alloc_pirq_struct
-struct pirq *alloc_pirq_struct(struct domain *);
-#endif
-#define free_pirq_struct xfree
-
/*
* Initialise/destroy arch-specific details of a VCPU.
* - vcpu_initialise() is called after the basic generic fields of the
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 257cb3a5f0..f39207c8d0 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -36,7 +36,7 @@ void send_guest_global_virq(struct domain *d, int virq);
* @pirq: Physical IRQ number
* Returns TRUE if the delivery port was already pending.
*/
-int send_guest_pirq(struct domain *, const struct pirq *);
+int send_guest_pirq(struct domain *d, int pirq);
/* Send a notification from a given domain's event-channel port. */
int evtchn_send(struct domain *d, unsigned int lport);
diff --git a/xen/include/xen/hvm/irq.h b/xen/include/xen/hvm/irq.h
index 5b2f77c63a..f21b02ceeb 100644
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -25,7 +25,7 @@
#include <xen/types.h>
#include <xen/spinlock.h>
#include <xen/tasklet.h>
-#include <xen/timer.h>
+#include <asm/irq.h>
#include <public/hvm/save.h>
struct dev_intx_gsi_link {
@@ -38,15 +38,11 @@ struct dev_intx_gsi_link {
#define _HVM_IRQ_DPCI_MACH_PCI_SHIFT 0
#define _HVM_IRQ_DPCI_MACH_MSI_SHIFT 1
-#define _HVM_IRQ_DPCI_MAPPED_SHIFT 2
-#define _HVM_IRQ_DPCI_EOI_LATCH_SHIFT 3
#define _HVM_IRQ_DPCI_GUEST_PCI_SHIFT 4
#define _HVM_IRQ_DPCI_GUEST_MSI_SHIFT 5
#define _HVM_IRQ_DPCI_TRANSLATE_SHIFT 15
#define HVM_IRQ_DPCI_MACH_PCI (1 << _HVM_IRQ_DPCI_MACH_PCI_SHIFT)
#define HVM_IRQ_DPCI_MACH_MSI (1 << _HVM_IRQ_DPCI_MACH_MSI_SHIFT)
-#define HVM_IRQ_DPCI_MAPPED (1 << _HVM_IRQ_DPCI_MAPPED_SHIFT)
-#define HVM_IRQ_DPCI_EOI_LATCH (1 << _HVM_IRQ_DPCI_EOI_LATCH_SHIFT)
#define HVM_IRQ_DPCI_GUEST_PCI (1 << _HVM_IRQ_DPCI_GUEST_PCI_SHIFT)
#define HVM_IRQ_DPCI_GUEST_MSI (1 << _HVM_IRQ_DPCI_GUEST_MSI_SHIFT)
#define HVM_IRQ_DPCI_TRANSLATE (1 << _HVM_IRQ_DPCI_TRANSLATE_SHIFT)
@@ -67,6 +63,14 @@ struct hvm_gmsi_info {
int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */
};
+struct hvm_mirq_dpci_mapping {
+ uint32_t flags;
+ int pending;
+ struct list_head digl_list;
+ struct domain *dom;
+ struct hvm_gmsi_info gmsi;
+};
+
struct hvm_girq_dpci_mapping {
struct list_head list;
uint8_t device;
@@ -84,33 +88,20 @@ struct hvm_girq_dpci_mapping {
/* Protected by domain's event_lock */
struct hvm_irq_dpci {
+ /* Machine IRQ to guest device/intx mapping. */
+ unsigned long *mapping;
+ struct hvm_mirq_dpci_mapping *mirq;
+ unsigned long *dirq_mask;
/* Guest IRQ to guest device/intx mapping. */
struct list_head girq[NR_HVM_IRQS];
/* Record of mapped ISA IRQs */
DECLARE_BITMAP(isairq_map, NR_ISAIRQS);
/* Record of mapped Links */
uint8_t link_cnt[NR_LINK];
+ struct timer *hvm_timer;
struct tasklet dirq_tasklet;
};
-/* Machine IRQ to guest device/intx mapping. */
-struct hvm_pirq_dpci {
- uint32_t flags;
- bool_t masked;
- uint16_t pending;
- struct list_head digl_list;
- struct domain *dom;
- struct hvm_gmsi_info gmsi;
- struct timer timer;
-};
-
-void pt_pirq_init(struct domain *, struct hvm_pirq_dpci *);
-bool_t pt_pirq_cleanup_check(struct hvm_pirq_dpci *);
-int pt_pirq_iterate(struct domain *d,
- int (*cb)(struct domain *, unsigned int pirq,
- struct hvm_pirq_dpci *, void *arg),
- void *arg);
-
/* Modify state of a PCI INTx wire. */
void hvm_pci_intx_assert(
struct domain *d, unsigned int device, unsigned int intx);
@@ -129,6 +120,4 @@ void hvm_maybe_deassert_evtchn_irq(void);
void hvm_assert_evtchn_irq(struct vcpu *v);
void hvm_set_callback_via(struct domain *d, uint64_t via);
-int vmsi_deliver(struct domain *, const struct hvm_pirq_dpci *);
-
#endif /* __XEN_HVM_IRQ_H__ */
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 4d4f4c4bf3..2dee3f2982 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -88,9 +88,7 @@ int iommu_unmap_page(struct domain *d, unsigned long gfn);
void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int order, int present);
void iommu_set_pgd(struct domain *d);
void iommu_domain_teardown(struct domain *d);
-
-struct pirq;
-int hvm_do_IRQ_dpci(struct domain *, struct pirq *);
+int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);
int dpci_ioport_intercept(ioreq_t *p);
int pt_irq_create_bind_vtd(struct domain *d,
xen_domctl_bind_pt_irq_t *pt_irq_bind);
diff --git a/xen/include/xen/irq.h b/xen/include/xen/irq.h
index be729346b7..02456e4a80 100644
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -135,41 +135,13 @@ extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
struct domain;
struct vcpu;
-
-struct pirq {
- u16 evtchn;
- bool_t masked;
- struct arch_pirq arch;
-};
-
-#define pirq_info(d, p) ((struct pirq *)radix_tree_lookup(&(d)->pirq_tree, p))
-
-/* Use this instead of pirq_info() if the structure may need allocating. */
-extern struct pirq *pirq_get_info(struct domain *, int pirq);
-
-#define pirq_field(d, p, f) ({ \
- const struct pirq *__pi = pirq_info(d, p); \
- __pi ? __pi->f : 0; \
-})
-#define pirq_to_evtchn(d, pirq) pirq_field(d, pirq, evtchn)
-#define pirq_masked(d, pirq) pirq_field(d, pirq, masked)
-
-void pirq_cleanup_check(struct pirq *, struct domain *, int);
-
-#define pirq_cleanup_check(info, d, pirq) \
- ((info)->evtchn ? pirq_cleanup_check(info, d, pirq) : (void)0)
-
-extern void pirq_guest_eoi(struct domain *, struct pirq *);
-extern void desc_guest_eoi(struct domain *, struct irq_desc *, struct pirq *);
+extern int pirq_guest_eoi(struct domain *d, int irq);
extern int pirq_guest_unmask(struct domain *d);
-extern int pirq_guest_bind(struct vcpu *, int pirq, struct pirq *,
- int will_share);
-extern void pirq_guest_unbind(struct domain *d, int pirq, struct pirq *);
+extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
+extern void pirq_guest_unbind(struct domain *d, int irq);
extern void pirq_set_affinity(struct domain *d, int irq, const cpumask_t *);
extern irq_desc_t *domain_spin_lock_irq_desc(
struct domain *d, int irq, unsigned long *pflags);
-extern irq_desc_t *pirq_spin_lock_irq_desc(
- struct domain *, const struct pirq *, unsigned long *pflags);
static inline void set_native_irq_info(unsigned int irq, const cpumask_t *mask)
{
diff --git a/xen/include/xen/pci.h b/xen/include/xen/pci.h
index dbd9d8c262..40c9847335 100644
--- a/xen/include/xen/pci.h
+++ b/xen/include/xen/pci.h
@@ -117,9 +117,8 @@ int pci_find_cap_offset(u8 bus, u8 dev, u8 func, u8 cap);
int pci_find_next_cap(u8 bus, unsigned int devfn, u8 pos, int cap);
int pci_find_ext_capability(int seg, int bus, int devfn, int cap);
-struct pirq;
-int msixtbl_pt_register(struct domain *, struct pirq *, uint64_t gtable);
-void msixtbl_pt_unregister(struct domain *, struct pirq *);
+int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable);
+void msixtbl_pt_unregister(struct domain *d, int pirq);
void pci_enable_acs(struct pci_dev *pdev);
#endif /* __XEN_PCI_H__ */
diff --git a/xen/include/xen/radix-tree.h b/xen/include/xen/radix-tree.h
index 289fe05dfc..d4bb4e8992 100644
--- a/xen/include/xen/radix-tree.h
+++ b/xen/include/xen/radix-tree.h
@@ -72,7 +72,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index,
void(*node_free)(struct radix_tree_node *));
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
- unsigned long first_index, unsigned int max_items,
- unsigned long *indexes);
+ unsigned long first_index, unsigned int max_items);
+void radix_tree_init(void);
#endif /* _XEN_RADIX_TREE_H */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 2adb3262af..17fac57dfd 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -21,7 +21,6 @@
#include <xen/irq.h>
#include <xen/mm.h>
#include <xen/tasklet.h>
-#include <xen/radix-tree.h>
#include <public/mem_event.h>
#include <xen/cpumask.h>
#include <xen/nodemask.h>
@@ -235,11 +234,13 @@ struct domain
struct grant_table *grant_table;
/*
- * Interrupt to event-channel mappings and other per-guest-pirq data.
- * Protected by the domain's event-channel spinlock.
+ * Interrupt to event-channel mappings. Updates should be protected by the
+ * domain's event-channel spinlock. Read accesses can also synchronise on
+ * the lock, but races don't usually matter.
*/
unsigned int nr_pirqs;
- struct radix_tree_root pirq_tree;
+ u16 *pirq_to_evtchn;
+ unsigned long *pirq_mask;
/* I/O capabilities (access to IRQs and memory-mapped I/O). */
struct rangeset *iomem_caps;