aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-07-15 10:43:19 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-07-15 10:43:19 +0100
commit85bbd10602dd33a6b4482ec8c9d504fd9baedd8c (patch)
tree75ef1029df4916fa654d525aafb57295af9e9298
parentcfa851ce72575c9974be6ff67d261b3fa740e79a (diff)
downloadxen-85bbd10602dd33a6b4482ec8c9d504fd9baedd8c.tar.gz
xen-85bbd10602dd33a6b4482ec8c9d504fd9baedd8c.tar.bz2
xen-85bbd10602dd33a6b4482ec8c9d504fd9baedd8c.zip
x2APIC/VT-d: improve interrupt remapping and queued invalidation enabling and disabling
x2APIC depends on interrupt remapping, so interrupt remapping needs to be enabled before x2APIC. Usually x2APIC is not enabled (x2apic_enabled=0) when enable interrupt remapping, although x2APIC will be enabled later. So it needs to pass a parameter to set interrupt mode in intremap_enable, instead of checking x2apic_enable. This patch adds a parameter "eim" to intremap_enable to achieve it. Interrupt remapping and queued invalidation are already enabled when enable x2apic, so it needn't to enable them again when setup iommu. This patch checks if interrupt remapping and queued invalidation are already enable or not, and won't enable them if already enabled. It does the similar in disabling, that's to say don't disable them if already disabled. Signed-off-by: Weidong Han <weidong.han@intel.com> xen-unstable changeset: 21717:176956d1d2fd xen-unstable date: Mon Jul 05 08:30:25 2010 +0100
-rw-r--r--xen/drivers/passthrough/vtd/extern.h2
-rw-r--r--xen/drivers/passthrough/vtd/intremap.c28
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c28
-rw-r--r--xen/drivers/passthrough/vtd/qinval.c15
4 files changed, 50 insertions, 23 deletions
diff --git a/xen/drivers/passthrough/vtd/extern.h b/xen/drivers/passthrough/vtd/extern.h
index 4f75f3f749..15134d306a 100644
--- a/xen/drivers/passthrough/vtd/extern.h
+++ b/xen/drivers/passthrough/vtd/extern.h
@@ -33,7 +33,7 @@ extern struct keyhandler dump_iommu_info_keyhandler;
int enable_qinval(struct iommu *iommu);
void disable_qinval(struct iommu *iommu);
-int enable_intremap(struct iommu *iommu);
+int enable_intremap(struct iommu *iommu, int eim);
void disable_intremap(struct iommu *iommu);
int queue_invalidate_context(struct iommu *iommu,
u16 did, u16 source_id, u8 function_mask, u8 granu);
diff --git a/xen/drivers/passthrough/vtd/intremap.c b/xen/drivers/passthrough/vtd/intremap.c
index ad6008957c..d3bb96a8a9 100644
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -709,7 +709,7 @@ void msi_msg_write_remap_rte(
}
#endif
-int enable_intremap(struct iommu *iommu)
+int enable_intremap(struct iommu *iommu, int eim)
{
struct acpi_drhd_unit *drhd;
struct ir_ctrl *ir_ctrl;
@@ -719,10 +719,25 @@ int enable_intremap(struct iommu *iommu)
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
ir_ctrl = iommu_ir_ctrl(iommu);
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+
+ /* Return if already enabled by Xen */
+ if ( (sts & DMA_GSTS_IRES) && ir_ctrl->iremap_maddr )
+ return 0;
+
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ if ( !(sts & DMA_GSTS_QIES) )
+ {
+ dprintk(XENLOG_ERR VTDPREFIX,
+ "Queued invalidation is not enabled, should not enable "
+ "interrupt remapping\n");
+ return -EINVAL;
+ }
+
if ( ir_ctrl->iremap_maddr == 0 )
{
drhd = iommu_to_drhd(iommu);
- ir_ctrl->iremap_maddr = alloc_pgtable_maddr(drhd, IREMAP_ARCH_PAGE_NR );
+ ir_ctrl->iremap_maddr = alloc_pgtable_maddr(drhd, IREMAP_ARCH_PAGE_NR);
if ( ir_ctrl->iremap_maddr == 0 )
{
dprintk(XENLOG_WARNING VTDPREFIX,
@@ -735,7 +750,7 @@ int enable_intremap(struct iommu *iommu)
#ifdef CONFIG_X86
/* set extended interrupt mode bit */
ir_ctrl->iremap_maddr |=
- x2apic_enabled ? (1 << IRTA_REG_EIME_SHIFT) : 0;
+ eim ? (1 << IRTA_REG_EIME_SHIFT) : 0;
#endif
spin_lock_irqsave(&iommu->register_lock, flags);
@@ -772,13 +787,18 @@ void disable_intremap(struct iommu *iommu)
u32 sts;
unsigned long flags;
- ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
+ if ( !ecap_intr_remap(iommu->ecap) )
+ return;
spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ if ( !(sts & DMA_GSTS_IRES) )
+ goto out;
+
dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
!(sts & DMA_GSTS_IRES), sts);
+out:
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 709833150f..f2f0d8ce05 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1829,24 +1829,20 @@ static int init_vtd_hw(void)
spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
spin_unlock_irqrestore(&iommu->register_lock, flags);
-
- /* initialize flush functions */
- flush = iommu_get_flush(iommu);
- flush->context = flush_context_reg;
- flush->iotlb = flush_iotlb_reg;
}
- if ( iommu_qinval )
+ for_each_drhd_unit ( drhd )
{
- for_each_drhd_unit ( drhd )
+ iommu = drhd->iommu;
+ /*
+ * If queued invalidation not enabled, use regiser based
+ * invalidation
+ */
+ if ( enable_qinval(iommu) != 0 )
{
- iommu = drhd->iommu;
- if ( enable_qinval(iommu) != 0 )
- {
- dprintk(XENLOG_INFO VTDPREFIX,
- "Failed to enable Queued Invalidation!\n");
- break;
- }
+ flush = iommu_get_flush(iommu);
+ flush->context = flush_context_reg;
+ flush->iotlb = flush_iotlb_reg;
}
}
@@ -1872,9 +1868,9 @@ static int init_vtd_hw(void)
for_each_drhd_unit ( drhd )
{
iommu = drhd->iommu;
- if ( enable_intremap(iommu) != 0 )
+ if ( enable_intremap(iommu, 0) != 0 )
{
- dprintk(XENLOG_INFO VTDPREFIX,
+ dprintk(XENLOG_WARNING VTDPREFIX,
"Failed to enable Interrupt Remapping!\n");
break;
}
diff --git a/xen/drivers/passthrough/vtd/qinval.c b/xen/drivers/passthrough/vtd/qinval.c
index 7dde42b302..a1ac3ee7ba 100644
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -437,10 +437,16 @@ int enable_qinval(struct iommu *iommu)
u32 sts;
unsigned long flags;
+ if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval )
+ return -ENOENT;
+
qi_ctrl = iommu_qi_ctrl(iommu);
flush = iommu_get_flush(iommu);
- ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
+ /* Return if already enabled by Xen */
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ if ( (sts & DMA_GSTS_QIES) && qi_ctrl->qinval_maddr )
+ return 0;
if ( qi_ctrl->qinval_maddr == 0 )
{
@@ -488,14 +494,19 @@ void disable_qinval(struct iommu *iommu)
u32 sts;
unsigned long flags;
- ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
+ if ( !ecap_queued_inval(iommu->ecap) )
+ return;
spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ if ( !(sts & DMA_GSTS_QIES) )
+ goto out;
+
dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE));
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
!(sts & DMA_GSTS_QIES), sts);
+out:
spin_unlock_irqrestore(&iommu->register_lock, flags);
}