aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/domain.c17
-rw-r--r--xen/arch/x86/hvm/hvm.c34
-rw-r--r--xen/arch/x86/hvm/irq.c35
-rw-r--r--xen/arch/x86/irq.c91
-rw-r--r--xen/arch/x86/physdev.c175
-rw-r--r--xen/common/event_channel.c30
-rw-r--r--xen/common/kernel.c3
-rw-r--r--xen/drivers/passthrough/io.c11
-rw-r--r--xen/include/asm-x86/domain.h3
-rw-r--r--xen/include/asm-x86/irq.h7
-rw-r--r--xen/include/public/features.h3
11 files changed, 337 insertions, 72 deletions
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index e9ed3e1643..313be27714 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -509,6 +509,19 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
if ( !IO_APIC_IRQ(i) )
d->arch.irq_pirq[i] = d->arch.pirq_irq[i] = i;
+ if ( is_hvm_domain(d) )
+ {
+ d->arch.pirq_emuirq = xmalloc_array(int, d->nr_pirqs);
+ d->arch.emuirq_pirq = xmalloc_array(int, nr_irqs);
+ if ( !d->arch.pirq_emuirq || !d->arch.emuirq_pirq )
+ goto fail;
+ for (i = 0; i < d->nr_pirqs; i++)
+ d->arch.pirq_emuirq[i] = IRQ_UNBOUND;
+ for (i = 0; i < nr_irqs; i++)
+ d->arch.emuirq_pirq[i] = IRQ_UNBOUND;
+ }
+
+
if ( (rc = iommu_domain_init(d)) != 0 )
goto fail;
@@ -549,6 +562,8 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
vmce_destroy_msr(d);
xfree(d->arch.pirq_irq);
xfree(d->arch.irq_pirq);
+ xfree(d->arch.pirq_emuirq);
+ xfree(d->arch.emuirq_pirq);
free_xenheap_page(d->shared_info);
if ( paging_initialised )
paging_final_teardown(d);
@@ -600,6 +615,8 @@ void arch_domain_destroy(struct domain *d)
free_xenheap_page(d->shared_info);
xfree(d->arch.pirq_irq);
xfree(d->arch.irq_pirq);
+ xfree(d->arch.pirq_emuirq);
+ xfree(d->arch.emuirq_pirq);
}
unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index bede0ae658..3d94c8736e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2440,6 +2440,20 @@ static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE(void) arg)
return rc;
}
+static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+ switch ( cmd )
+ {
+ case PHYSDEVOP_map_pirq:
+ case PHYSDEVOP_unmap_pirq:
+ case PHYSDEVOP_eoi:
+ case PHYSDEVOP_irq_status_query:
+ return do_physdev_op(cmd, arg);
+ default:
+ return -ENOSYS;
+ }
+}
+
static long hvm_vcpu_op(
int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
{
@@ -2475,6 +2489,7 @@ static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
[ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
[ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
[ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
+ [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op,
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
@@ -2526,10 +2541,28 @@ static long hvm_vcpu_op_compat32(
return rc;
}
+static long hvm_physdev_op_compat32(
+ int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+ switch ( cmd )
+ {
+ case PHYSDEVOP_map_pirq:
+ case PHYSDEVOP_unmap_pirq:
+ case PHYSDEVOP_eoi:
+ case PHYSDEVOP_irq_status_query:
+ return compat_physdev_op(cmd, arg);
+ break;
+ default:
+ return -ENOSYS;
+ break;
+ }
+}
+
static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
[ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
[ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
[ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
+ [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op,
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
@@ -2543,6 +2576,7 @@ static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
[ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op_compat32,
[ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op_compat32,
[ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op_compat32,
+ [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op_compat32,
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index b0ab1a5343..f1deada076 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -23,9 +23,30 @@
#include <xen/types.h>
#include <xen/event.h>
#include <xen/sched.h>
+#include <xen/irq.h>
#include <asm/hvm/domain.h>
#include <asm/hvm/support.h>
+/* Must be called with hvm_domain->irq_lock hold */
+static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
+{
+ int pirq = domain_emuirq_to_pirq(d, ioapic_gsi);
+ if ( pirq != IRQ_UNBOUND )
+ {
+ send_guest_pirq(d, pirq);
+ return;
+ }
+ vioapic_irq_positive_edge(d, ioapic_gsi);
+ vpic_irq_positive_edge(d, pic_irq);
+}
+
+/* Must be called with hvm_domain->irq_lock hold */
+static void deassert_irq(struct domain *d, unsigned isa_irq)
+{
+ if ( domain_emuirq_to_pirq(d, isa_irq) != IRQ_UNBOUND )
+ vpic_irq_negative_edge(d, isa_irq);
+}
+
static void __hvm_pci_intx_assert(
struct domain *d, unsigned int device, unsigned int intx)
{
@@ -45,10 +66,7 @@ static void __hvm_pci_intx_assert(
isa_irq = hvm_irq->pci_link.route[link];
if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
(hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
- {
- vioapic_irq_positive_edge(d, isa_irq);
- vpic_irq_positive_edge(d, isa_irq);
- }
+ assert_irq(d, isa_irq, isa_irq);
}
void hvm_pci_intx_assert(
@@ -77,7 +95,7 @@ static void __hvm_pci_intx_deassert(
isa_irq = hvm_irq->pci_link.route[link];
if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
(--hvm_irq->gsi_assert_count[isa_irq] == 0) )
- vpic_irq_negative_edge(d, isa_irq);
+ deassert_irq(d, isa_irq);
}
void hvm_pci_intx_deassert(
@@ -100,10 +118,7 @@ void hvm_isa_irq_assert(
if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
(hvm_irq->gsi_assert_count[gsi]++ == 0) )
- {
- vioapic_irq_positive_edge(d, gsi);
- vpic_irq_positive_edge(d, isa_irq);
- }
+ assert_irq(d, gsi, isa_irq);
spin_unlock(&d->arch.hvm_domain.irq_lock);
}
@@ -120,7 +135,7 @@ void hvm_isa_irq_deassert(
if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
(--hvm_irq->gsi_assert_count[gsi] == 0) )
- vpic_irq_negative_edge(d, isa_irq);
+ deassert_irq(d, isa_irq);
spin_unlock(&d->arch.hvm_domain.irq_lock);
}
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index ab7d9bfb11..ec74698c36 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1453,7 +1453,11 @@ int get_free_pirq(struct domain *d, int type, int index)
{
for ( i = 16; i < nr_irqs_gsi; i++ )
if ( !d->arch.pirq_irq[i] )
- break;
+ {
+ if ( !is_hvm_domain(d) ||
+ d->arch.pirq_emuirq[i] == IRQ_UNBOUND )
+ break;
+ }
if ( i == nr_irqs_gsi )
return -ENOSPC;
}
@@ -1461,7 +1465,11 @@ int get_free_pirq(struct domain *d, int type, int index)
{
for ( i = d->nr_pirqs - 1; i >= nr_irqs_gsi; i-- )
if ( !d->arch.pirq_irq[i] )
- break;
+ {
+ if ( !is_hvm_domain(d) ||
+ d->arch.pirq_emuirq[i] == IRQ_UNBOUND )
+ break;
+ }
if ( i < nr_irqs_gsi )
return -ENOSPC;
}
@@ -1792,3 +1800,82 @@ void fixup_irqs(void)
peoi[sp].ready = 1;
flush_ready_eoi();
}
+
+int map_domain_emuirq_pirq(struct domain *d, int pirq, int emuirq)
+{
+ int old_emuirq = IRQ_UNBOUND, old_pirq = IRQ_UNBOUND;
+
+ ASSERT(spin_is_locked(&d->event_lock));
+
+ if ( !is_hvm_domain(d) )
+ return -EINVAL;
+
+ if ( pirq < 0 || pirq >= d->nr_pirqs ||
+ emuirq == IRQ_UNBOUND || emuirq >= (int) nr_irqs )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or emuirq %d\n",
+ d->domain_id, pirq, emuirq);
+ return -EINVAL;
+ }
+
+ old_emuirq = domain_pirq_to_emuirq(d, pirq);
+ if ( emuirq != IRQ_PT )
+ old_pirq = domain_emuirq_to_pirq(d, emuirq);
+
+ if ( (old_emuirq != IRQ_UNBOUND && (old_emuirq != emuirq) ) ||
+ (old_pirq != IRQ_UNBOUND && (old_pirq != pirq)) )
+ {
+ dprintk(XENLOG_G_WARNING, "dom%d: pirq %d or emuirq %d already mapped\n",
+ d->domain_id, pirq, emuirq);
+ return 0;
+ }
+
+ d->arch.pirq_emuirq[pirq] = emuirq;
+ /* do not store emuirq mappings for pt devices */
+ if ( emuirq != IRQ_PT )
+ d->arch.emuirq_pirq[emuirq] = pirq;
+
+ return 0;
+}
+
+int unmap_domain_pirq_emuirq(struct domain *d, int pirq)
+{
+ int emuirq, ret = 0;
+
+ if ( !is_hvm_domain(d) )
+ return -EINVAL;
+
+ if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
+ return -EINVAL;
+
+ ASSERT(spin_is_locked(&d->event_lock));
+
+ emuirq = domain_pirq_to_emuirq(d, pirq);
+ if ( emuirq == IRQ_UNBOUND )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
+ d->domain_id, pirq);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ d->arch.pirq_emuirq[pirq] = IRQ_UNBOUND;
+ d->arch.emuirq_pirq[emuirq] = IRQ_UNBOUND;
+
+ done:
+ return ret;
+}
+
+int hvm_domain_use_pirq(struct domain *d, int pirq)
+{
+ int emuirq;
+
+ if ( !is_hvm_domain(d) )
+ return 0;
+
+ emuirq = domain_pirq_to_emuirq(d, pirq);
+ if ( emuirq != IRQ_UNBOUND && d->pirq_to_evtchn[pirq] != 0 )
+ return 1;
+ else
+ return 0;
+}
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index 7f8e71a28b..453b4f3006 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -27,6 +27,59 @@ int
ioapic_guest_write(
unsigned long physbase, unsigned int reg, u32 pval);
+static int physdev_hvm_map_pirq(
+ struct domain *d, struct physdev_map_pirq *map)
+{
+ int pirq, ret = 0;
+
+ spin_lock(&d->event_lock);
+ switch ( map->type )
+ {
+ case MAP_PIRQ_TYPE_GSI: {
+ struct hvm_irq_dpci *hvm_irq_dpci;
+ struct hvm_girq_dpci_mapping *girq;
+ uint32_t machine_gsi = 0;
+
+ /* find the machine gsi corresponding to the
+ * emulated gsi */
+ hvm_irq_dpci = domain_get_irq_dpci(d);
+ if ( hvm_irq_dpci )
+ {
+ list_for_each_entry ( girq,
+ &hvm_irq_dpci->girq[map->index],
+ list )
+ machine_gsi = girq->machine_gsi;
+ }
+ /* found one, this mean we are dealing with a pt device */
+ if ( machine_gsi )
+ {
+ map->index = domain_pirq_to_irq(d, machine_gsi);
+ pirq = machine_gsi;
+ ret = (pirq > 0) ? 0 : pirq;
+ }
+ /* we didn't find any, this means we are dealing
+ * with an emulated device */
+ else
+ {
+ pirq = map->pirq;
+ if ( pirq < 0 )
+ pirq = get_free_pirq(d, map->type, map->index);
+ ret = map_domain_emuirq_pirq(d, pirq, map->index);
+ }
+ map->pirq = pirq;
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ dprintk(XENLOG_G_WARNING, "map type %d not supported yet\n", map->type);
+ break;
+ }
+
+ spin_unlock(&d->event_lock);
+ return ret;
+}
+
static int physdev_map_pirq(struct physdev_map_pirq *map)
{
struct domain *d;
@@ -43,6 +96,12 @@ static int physdev_map_pirq(struct physdev_map_pirq *map)
if ( d == NULL )
return -ESRCH;
+ if ( map->domid == DOMID_SELF && is_hvm_domain(d) )
+ {
+ ret = physdev_hvm_map_pirq(d, map);
+ goto free_domain;
+ }
+
if ( !IS_PRIV_FOR(current->domain, d) )
{
ret = -EPERM;
@@ -52,55 +111,55 @@ static int physdev_map_pirq(struct physdev_map_pirq *map)
/* Verify or get irq. */
switch ( map->type )
{
- case MAP_PIRQ_TYPE_GSI:
- if ( map->index < 0 || map->index >= nr_irqs_gsi )
- {
- dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
- d->domain_id, map->index);
- ret = -EINVAL;
- goto free_domain;
- }
-
- irq = domain_pirq_to_irq(current->domain, map->index);
- if ( !irq )
- {
- if ( IS_PRIV(current->domain) )
- irq = map->index;
- else {
- dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n",
- d->domain_id);
- ret = -EINVAL;
- goto free_domain;
- }
- }
- break;
-
- case MAP_PIRQ_TYPE_MSI:
- irq = map->index;
- if ( irq == -1 )
- irq = create_irq();
+ case MAP_PIRQ_TYPE_GSI:
+ if ( map->index < 0 || map->index >= nr_irqs_gsi )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
+ d->domain_id, map->index);
+ ret = -EINVAL;
+ goto free_domain;
+ }
- if ( irq < 0 || irq >= nr_irqs )
- {
- dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
+ irq = domain_pirq_to_irq(current->domain, map->index);
+ if ( !irq )
+ {
+ if ( IS_PRIV(current->domain) )
+ irq = map->index;
+ else {
+ dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n",
d->domain_id);
ret = -EINVAL;
goto free_domain;
}
+ }
+ break;
- _msi.bus = map->bus;
- _msi.devfn = map->devfn;
- _msi.entry_nr = map->entry_nr;
- _msi.table_base = map->table_base;
- _msi.irq = irq;
- map_data = &_msi;
- break;
+ case MAP_PIRQ_TYPE_MSI:
+ irq = map->index;
+ if ( irq == -1 )
+ irq = create_irq();
- default:
- dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
- d->domain_id, map->type);
+ if ( irq < 0 || irq >= nr_irqs )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
+ d->domain_id);
ret = -EINVAL;
goto free_domain;
+ }
+
+ _msi.bus = map->bus;
+ _msi.devfn = map->devfn;
+ _msi.entry_nr = map->entry_nr;
+ _msi.table_base = map->table_base;
+ _msi.irq = irq;
+ map_data = &_msi;
+ break;
+
+ default:
+ dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
+ d->domain_id, map->type);
+ ret = -EINVAL;
+ goto free_domain;
}
spin_lock(&pcidevs_lock);
@@ -148,12 +207,15 @@ static int physdev_map_pirq(struct physdev_map_pirq *map)
if ( ret == 0 )
map->pirq = pirq;
-done:
+ if ( !ret && is_hvm_domain(d) )
+ map_domain_emuirq_pirq(d, pirq, IRQ_PT);
+
+ done:
spin_unlock(&d->event_lock);
spin_unlock(&pcidevs_lock);
if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
destroy_irq(irq);
-free_domain:
+ free_domain:
rcu_unlock_domain(d);
return ret;
}
@@ -169,6 +231,14 @@ static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
if ( d == NULL )
return -ESRCH;
+ if ( is_hvm_domain(d) )
+ {
+ spin_lock(&d->event_lock);
+ ret = unmap_domain_pirq_emuirq(d, unmap->pirq);
+ spin_unlock(&d->event_lock);
+ goto free_domain;
+ }
+
ret = -EPERM;
if ( !IS_PRIV_FOR(current->domain, d) )
goto free_domain;
@@ -179,7 +249,7 @@ static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
spin_unlock(&d->event_lock);
spin_unlock(&pcidevs_lock);
-free_domain:
+ free_domain:
rcu_unlock_domain(d);
return ret;
}
@@ -202,7 +272,11 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
break;
if ( v->domain->arch.pirq_eoi_map )
evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]);
- ret = pirq_guest_eoi(v->domain, eoi.irq);
+ if ( !is_hvm_domain(v->domain) ||
+ domain_pirq_to_emuirq(v->domain, eoi.irq) == IRQ_PT )
+ ret = pirq_guest_eoi(v->domain, eoi.irq);
+ else
+ ret = 0;
break;
}
@@ -257,6 +331,13 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
if ( (irq < 0) || (irq >= v->domain->nr_pirqs) )
break;
irq_status_query.flags = 0;
+ if ( is_hvm_domain(v->domain) &&
+ domain_pirq_to_emuirq(v->domain, irq) != IRQ_PT )
+ {
+ ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
+ break;
+ }
+
/*
* Even edge-triggered or message-based IRQs can need masking from
* time to time. If teh guest is not dynamically checking for this
@@ -345,9 +426,9 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
break;
/* Vector is only used by hypervisor, and dom0 shouldn't
- touch it in its world, return irq_op.irq as the vecotr,
- and make this hypercall dummy, and also defer the vector
- allocation when dom0 tries to programe ioapic entry. */
+ touch it in its world, return irq_op.irq as the vecotr,
+ and make this hypercall dummy, and also defer the vector
+ allocation when dom0 tries to programe ioapic entry. */
irq_op.vector = irq_op.irq;
ret = 0;
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index f4a2c24d7a..52ec7fc215 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -331,7 +331,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
return -EINVAL;
- if ( !irq_access_permitted(d, pirq) )
+ if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) )
return -EPERM;
spin_lock(&d->event_lock);
@@ -345,12 +345,15 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
chn = evtchn_from_port(d, port);
d->pirq_to_evtchn[pirq] = port;
- rc = pirq_guest_bind(v, pirq,
- !!(bind->flags & BIND_PIRQ__WILL_SHARE));
- if ( rc != 0 )
+ if ( !is_hvm_domain(d) )
{
- d->pirq_to_evtchn[pirq] = 0;
- goto out;
+ rc = pirq_guest_bind(
+ v, pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE));
+ if ( rc != 0 )
+ {
+ d->pirq_to_evtchn[pirq] = 0;
+ goto out;
+ }
}
chn->state = ECS_PIRQ;
@@ -403,7 +406,8 @@ static long __evtchn_close(struct domain *d1, int port1)
break;
case ECS_PIRQ:
- pirq_guest_unbind(d1, chn1->u.pirq.irq);
+ if ( !is_hvm_domain(d1) )
+ pirq_guest_unbind(d1, chn1->u.pirq.irq);
d1->pirq_to_evtchn[chn1->u.pirq.irq] = 0;
unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
break;
@@ -662,10 +666,16 @@ int send_guest_pirq(struct domain *d, int pirq)
struct evtchn *chn;
/*
- * It should not be possible to race with __evtchn_close():
- * The caller of this function must synchronise with pirq_guest_unbind().
+ * PV guests: It should not be possible to race with __evtchn_close(). The
+ * caller of this function must synchronise with pirq_guest_unbind().
+ * HVM guests: Port is legitimately zero when the guest disables the
+ * emulated interrupt/evtchn.
*/
- ASSERT(port != 0);
+ if ( port == 0 )
+ {
+ BUG_ON(!is_hvm_domain(d));
+ return 0;
+ }
chn = evtchn_from_port(d, port);
return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
diff --git a/xen/common/kernel.c b/xen/common/kernel.c
index 0bc954c1aa..2a1aabfc60 100644
--- a/xen/common/kernel.c
+++ b/xen/common/kernel.c
@@ -277,7 +277,8 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE(void) arg)
(1U << XENFEAT_gnttab_map_avail_bits);
else
fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
- (1U << XENFEAT_hvm_callback_vector);
+ (1U << XENFEAT_hvm_callback_vector) |
+ (1U << XENFEAT_hvm_pirqs);
#endif
break;
default:
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 8e3af44d19..6e85b6b654 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -375,6 +375,7 @@ int pt_irq_destroy_bind_vtd(
hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
hvm_irq_dpci->mirq[machine_gsi].flags = 0;
clear_bit(machine_gsi, hvm_irq_dpci->mapping);
+ unmap_domain_pirq_emuirq(d, machine_gsi);
}
}
spin_unlock(&d->event_lock);
@@ -454,7 +455,10 @@ void hvm_dpci_msi_eoi(struct domain *d, int vector)
extern int vmsi_deliver(struct domain *d, int pirq);
static int hvm_pci_msi_assert(struct domain *d, int pirq)
{
- return vmsi_deliver(d, pirq);
+ if ( hvm_domain_use_pirq(d, pirq) )
+ return send_guest_pirq(d, pirq);
+ else
+ return vmsi_deliver(d, pirq);
}
#endif
@@ -488,7 +492,10 @@ static void hvm_dirq_assist(unsigned long _d)
{
device = digl->device;
intx = digl->intx;
- hvm_pci_intx_assert(d, device, intx);
+ if ( hvm_domain_use_pirq(d, pirq) )
+ send_guest_pirq(d, pirq);
+ else
+ hvm_pci_intx_assert(d, device, intx);
hvm_irq_dpci->mirq[pirq].pending++;
#ifdef SUPPORT_MSI_REMAPPING
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index dc77780888..eaba32d1be 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -261,6 +261,9 @@ struct arch_domain
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
int *irq_pirq;
int *pirq_irq;
+ /* pirq to emulated irq and vice versa */
+ int *emuirq_pirq;
+ int *pirq_emuirq;
/* Shared page for notifying that explicit PIRQ EOI is required. */
unsigned long *pirq_eoi_map;
diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h
index eb069ffce1..078aa537c0 100644
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -114,6 +114,9 @@ int map_domain_pirq(struct domain *d, int pirq, int irq, int type,
int unmap_domain_pirq(struct domain *d, int pirq);
int get_free_pirq(struct domain *d, int type, int index);
void free_domain_pirqs(struct domain *d);
+int map_domain_emuirq_pirq(struct domain *d, int pirq, int irq);
+int unmap_domain_pirq_emuirq(struct domain *d, int pirq);
+int hvm_domain_use_pirq(struct domain *d, int irq);
int init_irq_data(void);
@@ -147,6 +150,10 @@ void irq_set_affinity(struct irq_desc *, const cpumask_t *mask);
#define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
#define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
+#define domain_pirq_to_emuirq(d, pirq) ((d)->arch.pirq_emuirq[pirq])
+#define domain_emuirq_to_pirq(d, emuirq) ((d)->arch.emuirq_pirq[emuirq])
+#define IRQ_UNBOUND -1
+#define IRQ_PT -2
bool_t cpu_has_pending_apic_eoi(void);
diff --git a/xen/include/public/features.h b/xen/include/public/features.h
index fef7901294..0e3c486249 100644
--- a/xen/include/public/features.h
+++ b/xen/include/public/features.h
@@ -74,6 +74,9 @@
/* x86: pvclock algorithm is safe to use on HVM */
#define XENFEAT_hvm_safe_pvclock 9
+/* x86: pirq can be used by HVM guests */
+#define XENFEAT_hvm_pirqs 10
+
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */