aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-01-08 11:25:06 +0000
committerKeir Fraser <keir.fraser@citrix.com>2009-01-08 11:25:06 +0000
commit11f8ac5ebfa45c92daa128d58a42b265ceeb0e4c (patch)
tree827d0e9e83307c6256318c3b47924c029d749d10
parent6df92ebc3022fa27c6bcf5bbcda1c2ccd24c1c17 (diff)
downloadxen-11f8ac5ebfa45c92daa128d58a42b265ceeb0e4c.tar.gz
xen-11f8ac5ebfa45c92daa128d58a42b265ceeb0e4c.tar.bz2
xen-11f8ac5ebfa45c92daa128d58a42b265ceeb0e4c.zip
passthrough: MSI-INTx translation for HVM
The patch adds a new type of pt_irq: PT_IRQ_TYPE_MSI_TRANSLATE. With it, guest dpci irq can now use msi as the underlying pirq while injected as INTx irq. Signed-off-by: Qing He <qing.he@intel.com>
-rw-r--r--tools/libxc/xc_domain.c3
-rw-r--r--xen/arch/x86/hvm/vmsi.c2
-rw-r--r--xen/drivers/passthrough/io.c105
-rw-r--r--xen/include/public/domctl.h1
-rw-r--r--xen/include/xen/hvm/irq.h11
5 files changed, 87 insertions, 35 deletions
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index b508149b3f..af43b48e57 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -958,7 +958,8 @@ int xc_domain_bind_pt_irq(
bind->hvm_domid = domid;
bind->irq_type = irq_type;
bind->machine_irq = machine_irq;
- if ( irq_type == PT_IRQ_TYPE_PCI )
+ if ( irq_type == PT_IRQ_TYPE_PCI ||
+ irq_type == PT_IRQ_TYPE_MSI_TRANSLATE )
{
bind->u.pci.bus = bus;
bind->u.pci.device = device;
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index cc9e9adde5..65682894e9 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -134,7 +134,7 @@ int vmsi_deliver(struct domain *d, int pirq)
"vector=%x trig_mode=%x\n",
dest, dest_mode, delivery_mode, vector, trig_mode);
- if ( !test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags) )
+ if ( !( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI ) )
{
gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
return 0;
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index a627b996fe..c2736b76e2 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -24,6 +24,11 @@
#include <asm/hvm/iommu.h>
#include <xen/hvm/irq.h>
+static int pt_irq_need_timer(uint32_t flags)
+{
+ return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE));
+}
+
static void pt_irq_time_out(void *data)
{
struct hvm_mirq_dpci_mapping *irq_map = data;
@@ -93,7 +98,8 @@ int pt_irq_create_bind_vtd(
if ( !test_and_set_bit(pirq, hvm_irq_dpci->mapping))
{
- set_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags);
+ hvm_irq_dpci->mirq[pirq].flags = HVM_IRQ_DPCI_MACH_MSI |
+ HVM_IRQ_DPCI_GUEST_MSI;
hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
@@ -104,7 +110,7 @@ int pt_irq_create_bind_vtd(
hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = 0;
hvm_irq_dpci->mirq[pirq].gmsi.gflags = 0;
hvm_irq_dpci->mirq[pirq].gmsi.gvec = 0;
- clear_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags);
+ hvm_irq_dpci->mirq[pirq].flags = 0;
clear_bit(pirq, hvm_irq_dpci->mapping);
spin_unlock(&d->event_lock);
return rc;
@@ -150,17 +156,33 @@ int pt_irq_create_bind_vtd(
if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
{
unsigned int vector = domain_irq_to_vector(d, machine_gsi);
+ unsigned int share;
hvm_irq_dpci->mirq[machine_gsi].dom = d;
+ if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI_TRANSLATE )
+ {
+ hvm_irq_dpci->mirq[machine_gsi].flags = HVM_IRQ_DPCI_MACH_MSI |
+ HVM_IRQ_DPCI_GUEST_PCI |
+ HVM_IRQ_DPCI_TRANSLATE;
+ share = 0;
+ }
+ else /* PT_IRQ_TYPE_PCI */
+ {
+ hvm_irq_dpci->mirq[machine_gsi].flags = HVM_IRQ_DPCI_MACH_PCI |
+ HVM_IRQ_DPCI_GUEST_PCI;
+ share = BIND_PIRQ__WILL_SHARE;
+ }
/* Init timer before binding */
- init_timer(&hvm_irq_dpci->hvm_timer[vector],
- pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
+ init_timer(&hvm_irq_dpci->hvm_timer[vector],
+ pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
/* Deal with gsi for legacy devices */
- rc = pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
+ rc = pirq_guest_bind(d->vcpu[0], machine_gsi, share);
if ( unlikely(rc) )
{
- kill_timer(&hvm_irq_dpci->hvm_timer[vector]);
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
+ kill_timer(&hvm_irq_dpci->hvm_timer[vector]);
hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
clear_bit(machine_gsi, hvm_irq_dpci->mapping);
hvm_irq_dpci->girq[guest_gsi].machine_gsi = 0;
@@ -237,7 +259,8 @@ int pt_irq_destroy_bind_vtd(
if ( list_empty(&hvm_irq_dpci->mirq[machine_gsi].digl_list) )
{
pirq_guest_unbind(d, machine_gsi);
- kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
+ kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
hvm_irq_dpci->mirq[machine_gsi].flags = 0;
clear_bit(machine_gsi, hvm_irq_dpci->mapping);
@@ -267,7 +290,7 @@ int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
* PIC) and we need to detect that.
*/
set_bit(mirq, dpci->dirq_mask);
- if ( !test_bit(_HVM_IRQ_DPCI_MSI, &dpci->mirq[mirq].flags) )
+ if ( pt_irq_need_timer(dpci->mirq[mirq].flags) )
set_timer(&dpci->hvm_timer[domain_irq_to_vector(d, mirq)],
NOW() + PT_IRQ_TIME_OUT);
vcpu_kick(d->vcpu[0]);
@@ -276,35 +299,40 @@ int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
}
#ifdef SUPPORT_MSI_REMAPPING
-void hvm_dpci_msi_eoi(struct domain *d, int vector)
+/* called with d->event_lock held */
+static void __msi_pirq_eoi(struct domain *d, int pirq)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
irq_desc_t *desc;
- int pirq;
-
- if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
- return;
-
- spin_lock(&d->event_lock);
- pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
- if ( ( pirq >= 0 ) && (pirq < NR_IRQS) &&
- test_bit(pirq, hvm_irq_dpci->mapping) &&
- (test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags)))
- {
+ if ( ( pirq >= 0 ) && ( pirq < NR_IRQS ) &&
+ test_bit(pirq, hvm_irq_dpci->mapping) &&
+ ( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI) )
+ {
BUG_ON(!local_irq_is_enabled());
desc = domain_spin_lock_irq_desc(d, pirq, NULL);
- if (!desc)
- {
- spin_unlock(&d->event_lock);
+ if ( !desc )
return;
- }
desc->status &= ~IRQ_INPROGRESS;
spin_unlock_irq(&desc->lock);
pirq_guest_eoi(d, pirq);
- }
+ }
+}
+
+void hvm_dpci_msi_eoi(struct domain *d, int vector)
+{
+ struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+ int pirq;
+
+ if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
+ return;
+
+ spin_lock(&d->event_lock);
+
+ pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
+ __msi_pirq_eoi(d, pirq);
spin_unlock(&d->event_lock);
}
@@ -336,14 +364,15 @@ void hvm_dirq_assist(struct vcpu *v)
spin_lock(&d->event_lock);
#ifdef SUPPORT_MSI_REMAPPING
- if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
+ if ( hvm_irq_dpci->mirq[irq].flags & HVM_IRQ_DPCI_GUEST_MSI )
{
hvm_pci_msi_assert(d, irq);
spin_unlock(&d->event_lock);
continue;
}
#endif
- stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[irq].flags) )
+ stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
{
@@ -351,6 +380,14 @@ void hvm_dirq_assist(struct vcpu *v)
intx = digl->intx;
hvm_pci_intx_assert(d, device, intx);
hvm_irq_dpci->mirq[irq].pending++;
+
+#ifdef SUPPORT_MSI_REMAPPING
+ if ( hvm_irq_dpci->mirq[irq].flags & HVM_IRQ_DPCI_TRANSLATE )
+ {
+ /* for translated MSI to INTx interrupt, eoi as early as possible */
+ __msi_pirq_eoi(d, irq);
+ }
+#endif
}
/*
@@ -360,8 +397,9 @@ void hvm_dirq_assist(struct vcpu *v)
* guest will never deal with the irq, then the physical interrupt line
* will never be deasserted.
*/
- set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
- NOW() + PT_IRQ_TIME_OUT);
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[irq].flags) )
+ set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
+ NOW() + PT_IRQ_TIME_OUT);
spin_unlock(&d->event_lock);
}
}
@@ -405,9 +443,12 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
* No need to get vector lock for timer
* since interrupt is still not EOIed
*/
- stop_timer(&hvm_irq_dpci->hvm_timer[
- domain_irq_to_vector(d, machine_gsi)]);
- pirq_guest_eoi(d, machine_gsi);
+ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
+ {
+ stop_timer(&hvm_irq_dpci->hvm_timer[
+ domain_irq_to_vector(d, machine_gsi)]);
+ pirq_guest_eoi(d, machine_gsi);
+ }
}
}
spin_unlock(&d->event_lock);
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 4863f48eec..70cede1176 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -466,6 +466,7 @@ typedef enum pt_irq_type_e {
PT_IRQ_TYPE_PCI,
PT_IRQ_TYPE_ISA,
PT_IRQ_TYPE_MSI,
+ PT_IRQ_TYPE_MSI_TRANSLATE,
} pt_irq_type_t;
struct xen_domctl_bind_pt_irq {
uint32_t machine_irq;
diff --git a/xen/include/xen/hvm/irq.h b/xen/include/xen/hvm/irq.h
index a89e2e9f88..854be8bf7e 100644
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -35,7 +35,16 @@ struct dev_intx_gsi_link {
uint8_t link;
};
-#define _HVM_IRQ_DPCI_MSI 0x1
+#define _HVM_IRQ_DPCI_MACH_PCI_SHIFT 0
+#define _HVM_IRQ_DPCI_MACH_MSI_SHIFT 1
+#define _HVM_IRQ_DPCI_GUEST_PCI_SHIFT 4
+#define _HVM_IRQ_DPCI_GUEST_MSI_SHIFT 5
+#define _HVM_IRQ_DPCI_TRANSLATE_SHIFT 15
+#define HVM_IRQ_DPCI_MACH_PCI (1 << _HVM_IRQ_DPCI_MACH_PCI_SHIFT)
+#define HVM_IRQ_DPCI_MACH_MSI (1 << _HVM_IRQ_DPCI_MACH_MSI_SHIFT)
+#define HVM_IRQ_DPCI_GUEST_PCI (1 << _HVM_IRQ_DPCI_GUEST_PCI_SHIFT)
+#define HVM_IRQ_DPCI_GUEST_MSI (1 << _HVM_IRQ_DPCI_GUEST_MSI_SHIFT)
+#define HVM_IRQ_DPCI_TRANSLATE (1 << _HVM_IRQ_DPCI_TRANSLATE_SHIFT)
struct hvm_gmsi_info {
uint32_t gvec;