aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/irq.c
diff options
context:
space:
mode:
authorkaf24@localhost.localdomain <kaf24@localhost.localdomain>2006-12-26 19:49:11 +0000
committerkaf24@localhost.localdomain <kaf24@localhost.localdomain>2006-12-26 19:49:11 +0000
commit175eac1a6d020149c33d3536068db35fc72f56fe (patch)
treebf53d36d242ac977ba3598e7d036b0dde862e1b9 /xen/arch/x86/hvm/irq.c
parent894243ac7bd572612b6264b2287f7d79ccee7168 (diff)
downloadxen-175eac1a6d020149c33d3536068db35fc72f56fe.tar.gz
xen-175eac1a6d020149c33d3536068db35fc72f56fe.tar.bz2
xen-175eac1a6d020149c33d3536068db35fc72f56fe.zip
[HVM] Route ISA IRQ 0 to IOAPIC GSI 2, just like 99% of native systems.
This is a built-in assumption of HPET 'legacy' IRQ routing, which is why we have changed to this routing strategy now. Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/arch/x86/hvm/irq.c')
-rw-r--r--xen/arch/x86/hvm/irq.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index ce2e11bcca..e9f975eb53 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -85,15 +85,16 @@ void hvm_isa_irq_assert(
struct domain *d, unsigned int isa_irq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ unsigned int gsi = (isa_irq == 0) ? 2 : isa_irq;
ASSERT(isa_irq <= 15);
spin_lock(&hvm_irq->lock);
if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq) &&
- (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
+ (hvm_irq->gsi_assert_count[gsi]++ == 0) )
{
- vioapic_irq_positive_edge(d, isa_irq);
+ vioapic_irq_positive_edge(d, gsi);
vpic_irq_positive_edge(d, isa_irq);
}
@@ -104,13 +105,14 @@ void hvm_isa_irq_deassert(
struct domain *d, unsigned int isa_irq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ unsigned int gsi = (isa_irq == 0) ? 2 : isa_irq;
ASSERT(isa_irq <= 15);
spin_lock(&hvm_irq->lock);
if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq) &&
- (--hvm_irq->gsi_assert_count[isa_irq] == 0) )
+ (--hvm_irq->gsi_assert_count[gsi] == 0) )
vpic_irq_negative_edge(d, isa_irq);
spin_unlock(&hvm_irq->lock);