aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-10-09 11:14:52 +0100
committerKeir Fraser <keir.fraser@citrix.com>2008-10-09 11:14:52 +0100
commitcdb9a3d55985ae317f78f147dd3af5c02c564caf (patch)
tree139075c762de9af2e7346145118c0117c1eac1fb
parent7ae8a1e482744619d6f4b93d67a044523099eae7 (diff)
downloadxen-cdb9a3d55985ae317f78f147dd3af5c02c564caf.tar.gz
xen-cdb9a3d55985ae317f78f147dd3af5c02c564caf.tar.bz2
xen-cdb9a3d55985ae317f78f147dd3af5c02c564caf.zip
Fix lock issue for hvm pass-through domain
This patch protect the hvm_irq_dpci structure with evtchn_lock, thus the access to domain's pirq_vector mapping is also protected. Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
-rw-r--r--xen/arch/x86/hvm/svm/intr.c5
-rw-r--r--xen/arch/x86/hvm/vmsi.c2
-rw-r--r--xen/arch/x86/hvm/vmx/intr.c15
-rw-r--r--xen/arch/x86/irq.c2
-rw-r--r--xen/drivers/passthrough/io.c154
-rw-r--r--xen/drivers/passthrough/pci.c12
-rw-r--r--xen/drivers/passthrough/vtd/x86/vtd.c26
-rw-r--r--xen/include/asm-x86/hvm/irq.h6
-rw-r--r--xen/include/xen/irq.h2
9 files changed, 137 insertions, 87 deletions
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 7fec5dc35e..9027790247 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -124,9 +124,11 @@ static void svm_dirq_assist(struct vcpu *v)
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
+ spin_lock(&d->evtchn_lock);
if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
{
hvm_pci_msi_assert(d, irq);
+ spin_unlock(&d->evtchn_lock);
continue;
}
@@ -137,9 +139,7 @@ static void svm_dirq_assist(struct vcpu *v)
device = digl->device;
intx = digl->intx;
hvm_pci_intx_assert(d, device, intx);
- spin_lock(&hvm_irq_dpci->dirq_lock);
hvm_irq_dpci->mirq[irq].pending++;
- spin_unlock(&hvm_irq_dpci->dirq_lock);
}
/*
@@ -151,6 +151,7 @@ static void svm_dirq_assist(struct vcpu *v)
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
+ spin_unlock(&d->evtchn_lock);
}
}
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index d6ad008f4a..6eefb61bfa 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -134,7 +134,7 @@ int vmsi_deliver(struct domain *d, int pirq)
"vector=%x trig_mode=%x\n",
dest, dest_mode, delivery_mode, vector, trig_mode);
- if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
+ if ( !test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags) )
{
gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
return 0;
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index 7379066734..808446f4c9 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -127,11 +127,13 @@ static void vmx_dirq_assist(struct vcpu *v)
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
- if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
- {
- hvm_pci_msi_assert(d, irq);
- continue;
- }
+ spin_lock(&d->evtchn_lock);
+ if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
+ {
+ hvm_pci_msi_assert(d, irq);
+ spin_unlock(&d->evtchn_lock);
+ continue;
+ }
stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
@@ -140,9 +142,7 @@ static void vmx_dirq_assist(struct vcpu *v)
device = digl->device;
intx = digl->intx;
hvm_pci_intx_assert(d, device, intx);
- spin_lock(&hvm_irq_dpci->dirq_lock);
hvm_irq_dpci->mirq[irq].pending++;
- spin_unlock(&hvm_irq_dpci->dirq_lock);
}
/*
@@ -154,6 +154,7 @@ static void vmx_dirq_assist(struct vcpu *v)
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
+ spin_unlock(&d->evtchn_lock);
}
}
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 718b3dde39..eb1d2ef776 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -285,7 +285,7 @@ static void __do_IRQ_guest(int vector)
* The descriptor is returned locked. This function is safe against changes
* to the per-domain irq-to-vector mapping.
*/
-static irq_desc_t *domain_spin_lock_irq_desc(
+irq_desc_t *domain_spin_lock_irq_desc(
struct domain *d, int irq, unsigned long *pflags)
{
unsigned int vector;
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index c8d81bcefa..074fc35183 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -26,10 +26,14 @@ static void pt_irq_time_out(void *data)
struct hvm_mirq_dpci_mapping *irq_map = data;
unsigned int guest_gsi, machine_gsi = 0;
int vector;
- struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom);
+ struct hvm_irq_dpci *dpci = NULL;
struct dev_intx_gsi_link *digl;
uint32_t device, intx;
+ spin_lock(&irq_map->dom->evtchn_lock);
+
+ dpci = domain_get_irq_dpci(irq_map->dom);
+ ASSERT(dpci);
list_for_each_entry ( digl, &irq_map->digl_list, list )
{
guest_gsi = digl->gsi;
@@ -41,55 +45,65 @@ static void pt_irq_time_out(void *data)
clear_bit(machine_gsi, dpci->dirq_mask);
vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
- stop_timer(&dpci->hvm_timer[vector]);
- spin_lock(&dpci->dirq_lock);
dpci->mirq[machine_gsi].pending = 0;
- spin_unlock(&dpci->dirq_lock);
+ spin_unlock(&irq_map->dom->evtchn_lock);
pirq_guest_eoi(irq_map->dom, machine_gsi);
}
int pt_irq_create_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct dev_intx_gsi_link *digl;
+ int pirq = pt_irq_bind->machine_irq;
+
+ if ( pirq < 0 || pirq >= NR_PIRQS )
+ return -EINVAL;
+
+ spin_lock(&d->evtchn_lock);
+ hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci == NULL )
{
hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
if ( hvm_irq_dpci == NULL )
+ {
+ spin_unlock(&d->evtchn_lock);
return -ENOMEM;
-
+ }
memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
- spin_lock_init(&hvm_irq_dpci->dirq_lock);
for ( int i = 0; i < NR_IRQS; i++ )
INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
+ }
- if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
- xfree(hvm_irq_dpci);
+ if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
+ {
+ xfree(hvm_irq_dpci);
+ spin_unlock(&d->evtchn_lock);
+ return -EINVAL;
}
if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
{
- int pirq = pt_irq_bind->machine_irq;
-
- if ( pirq < 0 || pirq >= NR_IRQS )
- return -EINVAL;
- if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID ) )
+ if ( !test_and_set_bit(pirq, hvm_irq_dpci->mapping))
{
- hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |
- HVM_IRQ_DPCI_MSI ;
+ set_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags);
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
+ hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
+ hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
+ /* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
pirq_guest_bind(d->vcpu[0], pirq, 0);
}
+ else if (hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec
+ ||hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] != pirq)
- hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |HVM_IRQ_DPCI_MSI ;
- hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
- hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
- hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
-
+ {
+ spin_unlock(&d->evtchn_lock);
+ return -EBUSY;
+ }
}
else
{
@@ -102,7 +116,10 @@ int pt_irq_create_bind_vtd(
digl = xmalloc(struct dev_intx_gsi_link);
if ( !digl )
+ {
+ spin_unlock(&d->evtchn_lock);
return -ENOMEM;
+ }
digl->device = device;
digl->intx = intx;
@@ -117,11 +134,11 @@ int pt_irq_create_bind_vtd(
hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
/* Bind the same mirq once in the same domain */
- if ( !(hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
+ if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
{
- hvm_irq_dpci->mirq[machine_gsi].flags |= HVM_IRQ_DPCI_VALID;
hvm_irq_dpci->mirq[machine_gsi].dom = d;
+ /* Init timer before binding */
init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)],
pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
/* Deal with gsi for legacy devices */
@@ -132,37 +149,45 @@ int pt_irq_create_bind_vtd(
"VT-d irq bind: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
}
+ spin_unlock(&d->evtchn_lock);
return 0;
}
int pt_irq_destroy_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct list_head *digl_list, *tmp;
struct dev_intx_gsi_link *digl;
- if ( hvm_irq_dpci == NULL )
- return 0;
-
machine_gsi = pt_irq_bind->machine_irq;
device = pt_irq_bind->u.pci.device;
intx = pt_irq_bind->u.pci.intx;
guest_gsi = hvm_pci_intx_gsi(device, intx);
link = hvm_pci_intx_link(device, intx);
- hvm_irq_dpci->link_cnt[link]--;
gdprintk(XENLOG_INFO,
"pt_irq_destroy_bind_vtd: machine_gsi=%d "
"guest_gsi=%d, device=%d, intx=%d.\n",
machine_gsi, guest_gsi, device, intx);
+ spin_lock(&d->evtchn_lock);
+
+ hvm_irq_dpci = domain_get_irq_dpci(d);
+
+ if ( hvm_irq_dpci == NULL )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return -EINVAL;
+ }
+
+ hvm_irq_dpci->link_cnt[link]--;
memset(&hvm_irq_dpci->girq[guest_gsi], 0,
sizeof(struct hvm_girq_dpci_mapping));
/* clear the mirq info */
- if ( (hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
+ if ( test_bit(machine_gsi, hvm_irq_dpci->mapping))
{
list_for_each_safe ( digl_list, tmp,
&hvm_irq_dpci->mirq[machine_gsi].digl_list )
@@ -185,9 +210,10 @@ int pt_irq_destroy_bind_vtd(
kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
hvm_irq_dpci->mirq[machine_gsi].flags = 0;
+ clear_bit(machine_gsi, hvm_irq_dpci->mapping);
}
}
-
+ spin_unlock(&d->evtchn_lock);
gdprintk(XENLOG_INFO,
"XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
@@ -199,8 +225,9 @@ int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
{
struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
+ ASSERT(spin_is_locked(&irq_desc[domain_irq_to_vector(d, mirq)].lock));
if ( !iommu_enabled || (d == dom0) || !dpci ||
- !dpci->mirq[mirq].flags & HVM_IRQ_DPCI_VALID )
+ !test_bit(mirq, dpci->mapping))
return 0;
/*
@@ -218,44 +245,46 @@ int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
return 1;
}
-
void hvm_dpci_msi_eoi(struct domain *d, int vector)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
- int pirq;
- unsigned long flags;
irq_desc_t *desc;
+ int pirq;
if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
return;
+ spin_lock(&d->evtchn_lock);
pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
- (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID) &&
- (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
- {
- int vec;
- vec = domain_irq_to_vector(d, pirq);
- desc = &irq_desc[vec];
-
- spin_lock_irqsave(&desc->lock, flags);
- desc->status &= ~IRQ_INPROGRESS;
- spin_unlock_irqrestore(&desc->lock, flags);
-
- pirq_guest_eoi(d, pirq);
- }
+ test_bit(pirq, hvm_irq_dpci->mapping) &&
+ (test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags)))
+ {
+ BUG_ON(!local_irq_is_enabled());
+ desc = domain_spin_lock_irq_desc(d, pirq, NULL);
+ if (!desc)
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
+
+ desc->status &= ~IRQ_INPROGRESS;
+ spin_unlock_irq(&desc->lock);
+
+ pirq_guest_eoi(d, pirq);
+ }
+
+ spin_unlock(&d->evtchn_lock);
}
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
union vioapic_redir_entry *ent)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t device, intx, machine_gsi;
- if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
- (guest_gsi >= NR_ISAIRQS &&
- !hvm_irq_dpci->girq[guest_gsi].valid) )
+ if ( !iommu_enabled)
return;
if ( guest_gsi < NR_ISAIRQS )
@@ -264,23 +293,34 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
return;
}
- machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
+ spin_lock(&d->evtchn_lock);
+ hvm_irq_dpci = domain_get_irq_dpci(d);
+
+ if((hvm_irq_dpci == NULL) ||
+ (guest_gsi >= NR_ISAIRQS &&
+ !hvm_irq_dpci->girq[guest_gsi].valid) )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
+
device = hvm_irq_dpci->girq[guest_gsi].device;
intx = hvm_irq_dpci->girq[guest_gsi].intx;
hvm_pci_intx_deassert(d, device, intx);
- spin_lock(&hvm_irq_dpci->dirq_lock);
+ machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
{
- spin_unlock(&hvm_irq_dpci->dirq_lock);
-
if ( (ent == NULL) || !ent->fields.mask )
{
+ /*
+ * No need to get vector lock for timer
+ * since interrupt is still not EOIed
+ */
stop_timer(&hvm_irq_dpci->hvm_timer[
domain_irq_to_vector(d, machine_gsi)]);
pirq_guest_eoi(d, machine_gsi);
}
}
- else
- spin_unlock(&hvm_irq_dpci->dirq_lock);
+ spin_unlock(&d->evtchn_lock);
}
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index df7161d539..fab74611da 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -154,7 +154,7 @@ int pci_remove_device(u8 bus, u8 devfn)
static void pci_clean_dpci_irqs(struct domain *d)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t i;
struct list_head *digl_list, *tmp;
struct dev_intx_gsi_link *digl;
@@ -165,13 +165,14 @@ static void pci_clean_dpci_irqs(struct domain *d)
if ( !is_hvm_domain(d) && !need_iommu(d) )
return;
+ spin_lock(&d->evtchn_lock);
+ hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci != NULL )
{
- for ( i = 0; i < NR_IRQS; i++ )
+ for ( i = find_first_bit(hvm_irq_dpci->mapping, NR_PIRQS);
+ i < NR_PIRQS;
+ i = find_next_bit(hvm_irq_dpci->mapping, NR_PIRQS, i + 1) )
{
- if ( !(hvm_irq_dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID) )
- continue;
-
pirq_guest_unbind(d, i);
kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
@@ -188,6 +189,7 @@ static void pci_clean_dpci_irqs(struct domain *d)
d->arch.hvm_domain.irq.dpci = NULL;
xfree(hvm_irq_dpci);
}
+ spin_unlock(&d->evtchn_lock);
}
void pci_release_devices(struct domain *d)
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c
index 62e7d0d5be..71ec004f4a 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -85,37 +85,41 @@ int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
- struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *dpci = NULL;
struct dev_intx_gsi_link *digl, *tmp;
int i;
ASSERT(isairq < NR_ISAIRQS);
- if ( !vtd_enabled || !dpci ||
- !test_bit(isairq, dpci->isairq_map) )
+ if ( !vtd_enabled)
return;
+ spin_lock(&d->evtchn_lock);
+
+ dpci = domain_get_irq_dpci(d);
+
+ if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
/* Multiple mirq may be mapped to one isa irq */
- for ( i = 0; i < NR_IRQS; i++ )
+ for ( i = find_first_bit(dpci->mapping, NR_PIRQS);
+ i < NR_PIRQS;
+ i = find_next_bit(dpci->mapping, NR_PIRQS, i + 1) )
{
- if ( !dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID )
- continue;
-
list_for_each_entry_safe ( digl, tmp,
&dpci->mirq[i].digl_list, list )
{
if ( hvm_irq->pci_link.route[digl->link] == isairq )
{
hvm_pci_intx_deassert(d, digl->device, digl->intx);
- spin_lock(&dpci->dirq_lock);
if ( --dpci->mirq[i].pending == 0 )
{
- spin_unlock(&dpci->dirq_lock);
stop_timer(&dpci->hvm_timer[domain_irq_to_vector(d, i)]);
pirq_guest_eoi(d, i);
}
- else
- spin_unlock(&dpci->dirq_lock);
}
}
}
+ spin_unlock(&d->evtchn_lock);
}
diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h
index 1daa60c244..ed0bf0f4fd 100644
--- a/xen/include/asm-x86/hvm/irq.h
+++ b/xen/include/asm-x86/hvm/irq.h
@@ -25,6 +25,7 @@
#include <xen/types.h>
#include <xen/spinlock.h>
#include <asm/irq.h>
+#include <asm/pirq.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/vpic.h>
#include <asm/hvm/vioapic.h>
@@ -38,8 +39,6 @@ struct dev_intx_gsi_link {
uint8_t link;
};
-#define HVM_IRQ_DPCI_VALID 0x1
-#define HVM_IRQ_DPCI_MSI 0x2
#define _HVM_IRQ_DPCI_MSI 0x1
struct hvm_gmsi_info {
@@ -64,9 +63,10 @@ struct hvm_girq_dpci_mapping {
#define NR_ISAIRQS 16
#define NR_LINK 4
+/* Protected by domain's evtchn_lock */
struct hvm_irq_dpci {
- spinlock_t dirq_lock;
/* Machine IRQ to guest device/intx mapping. */
+ DECLARE_BITMAP(mapping, NR_PIRQS);
struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
/* Guest IRQ to guest device/intx mapping. */
struct hvm_girq_dpci_mapping girq[NR_IRQS];
diff --git a/xen/include/xen/irq.h b/xen/include/xen/irq.h
index 71f17d6e87..a4dd3f6333 100644
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -78,6 +78,8 @@ extern int pirq_guest_eoi(struct domain *d, int irq);
extern int pirq_guest_unmask(struct domain *d);
extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
extern void pirq_guest_unbind(struct domain *d, int irq);
+extern irq_desc_t *domain_spin_lock_irq_desc(
+ struct domain *d, int irq, unsigned long *pflags);
static inline void set_native_irq_info(int irq, cpumask_t mask)
{