aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-01-10 08:40:32 +0000
committerKeir Fraser <keir@xen.org>2011-01-10 08:40:32 +0000
commit6cd8743754e43125aba681f11b94e2d8bbb413e0 (patch)
treeb63a74a6f152b92b11d8e24845e9497aaf343190
parent4306e1ed46b7d0b3b44e69983f8701ce7785b669 (diff)
downloadxen-6cd8743754e43125aba681f11b94e2d8bbb413e0.tar.gz
xen-6cd8743754e43125aba681f11b94e2d8bbb413e0.tar.bz2
xen-6cd8743754e43125aba681f11b94e2d8bbb413e0.zip
EPT/VT-d: bug fix for EPT/VT-d table sharing
This patch makes following changes: 1) Moves EPT/VT-d sharing initialization back to when it is actually needed to make sure vmx_ept_vpid_cap has been initialized. 2) added page order parameter to iommu_pte_flush() to tell VT-d what size of page to flush. 3) added hap_2mb flag to ease performance studies between base 4KB EPT size and when 2MB and 1GB page size support are enabled. Signed-off-by: Allen Kay <allen.m.kay@intel.com>
-rw-r--r--xen/arch/x86/mm/hap/p2m-ept.c6
-rw-r--r--xen/arch/x86/mm/p2m.c5
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c90
-rw-r--r--xen/include/xen/iommu.h2
4 files changed, 45 insertions, 58 deletions
diff --git a/xen/arch/x86/mm/hap/p2m-ept.c b/xen/arch/x86/mm/hap/p2m-ept.c
index 47a9f5e8c2..7eebb3253e 100644
--- a/xen/arch/x86/mm/hap/p2m-ept.c
+++ b/xen/arch/x86/mm/hap/p2m-ept.c
@@ -451,12 +451,12 @@ out:
if ( rv && iommu_enabled && need_iommu(p2m->domain) && need_modify_vtd_table )
{
if ( iommu_hap_pt_share )
- iommu_pte_flush(d, gfn, (u64*)ept_entry, vtd_pte_present);
+ iommu_pte_flush(d, gfn, (u64*)ept_entry, order, vtd_pte_present);
else
{
if ( p2mt == p2m_ram_rw )
{
- if ( order == EPT_TABLE_ORDER )
+ if ( order > 0 )
{
for ( i = 0; i < (1 << order); i++ )
iommu_map_page(
@@ -469,7 +469,7 @@ out:
}
else
{
- if ( order == EPT_TABLE_ORDER )
+ if ( order > 0 )
{
for ( i = 0; i < (1 << order); i++ )
iommu_unmap_page(p2m->domain, gfn - offset + i);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index bfe2842817..c77be9497f 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -43,6 +43,9 @@
static bool_t __read_mostly opt_hap_1gb = 1;
boolean_param("hap_1gb", opt_hap_1gb);
+static bool_t __read_mostly opt_hap_2mb = 1;
+boolean_param("hap_2mb", opt_hap_2mb);
+
/* Printouts */
#define P2M_PRINTK(_f, _a...) \
debugtrace_printk("p2m: %s(): " _f, __func__, ##_a)
@@ -1779,7 +1782,7 @@ int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
order = ( (((gfn | mfn_x(mfn) | todo) & ((1ul << 18) - 1)) == 0) &&
hvm_hap_has_1gb(d) && opt_hap_1gb ) ? 18 :
((((gfn | mfn_x(mfn) | todo) & ((1ul << 9) - 1)) == 0) &&
- hvm_hap_has_2mb(d)) ? 9 : 0;
+ hvm_hap_has_2mb(d) && opt_hap_2mb) ? 9 : 0;
else
order = 0;
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index dd7c7f666b..1d01f8cbc4 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -518,24 +518,9 @@ static int inline iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
return status;
}
-static int inline get_alignment(u64 base, unsigned int size)
-{
- int t = 0;
- u64 end;
-
- end = base + size - 1;
- while ( base != end )
- {
- t++;
- base >>= 1;
- end >>= 1;
- }
- return t;
-}
-
static int inline iommu_flush_iotlb_psi(
struct iommu *iommu, u16 did, u64 addr, unsigned int pages,
- int flush_non_present_entry, int flush_dev_iotlb)
+ int order, int flush_non_present_entry, int flush_dev_iotlb)
{
unsigned int align;
struct iommu_flush *flush = iommu_get_flush(iommu);
@@ -548,17 +533,12 @@ static int inline iommu_flush_iotlb_psi(
if ( !cap_pgsel_inv(iommu->cap) )
return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb);
- /*
- * PSI requires page size is 2 ^ x, and the base address is naturally
- * aligned to the size
- */
- align = get_alignment(addr >> PAGE_SHIFT_4K, pages);
/* Fallback to domain selective flush if size is too big */
- if ( align > cap_max_amask_val(iommu->cap) )
+ if ( order > cap_max_amask_val(iommu->cap) )
return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb);
- addr >>= PAGE_SHIFT_4K + align;
- addr <<= PAGE_SHIFT_4K + align;
+ addr >>= PAGE_SHIFT_4K + order;
+ addr <<= PAGE_SHIFT_4K + order;
/* apply platform specific errata workarounds */
vtd_ops_preamble_quirk(iommu);
@@ -634,8 +614,8 @@ static void dma_pte_clear_one(struct domain *domain, u64 addr)
iommu_domid= domain_iommu_domid(domain, iommu);
if ( iommu_domid == -1 )
continue;
- if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
- addr, 1, 0, flush_dev_iotlb) )
+ if ( iommu_flush_iotlb_psi(iommu, iommu_domid, addr,
+ 1, 0, 0, flush_dev_iotlb) )
iommu_flush_write_buffer(iommu);
}
}
@@ -1710,7 +1690,7 @@ static int intel_iommu_map_page(
if ( iommu_domid == -1 )
continue;
if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
- (paddr_t)gfn << PAGE_SHIFT_4K, 1,
+ (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0,
!dma_pte_present(old), flush_dev_iotlb) )
iommu_flush_write_buffer(iommu);
}
@@ -1729,7 +1709,8 @@ static int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
return 0;
}
-void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int present)
+void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
+ int order, int present)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu = NULL;
@@ -1751,7 +1732,7 @@ void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int present)
continue;
if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
(paddr_t)gfn << PAGE_SHIFT_4K, 1,
- !present, flush_dev_iotlb) )
+ order, !present, flush_dev_iotlb) )
iommu_flush_write_buffer(iommu);
}
}
@@ -1769,6 +1750,28 @@ static int vtd_ept_page_compatible(struct iommu *iommu)
return 1;
}
+static bool_t vtd_ept_share(void)
+{
+ struct acpi_drhd_unit *drhd;
+ struct iommu *iommu;
+ bool_t share = TRUE;
+
+ /* sharept defaults to 0 for now, default to 1 when feature matures */
+ if ( !sharept )
+ share = FALSE;
+
+ /*
+ * Determine whether EPT and VT-d page tables can be shared or not.
+ */
+ for_each_drhd_unit ( drhd )
+ {
+ iommu = drhd->iommu;
+ if ( !vtd_ept_page_compatible(drhd->iommu) )
+ share = FALSE;
+ }
+ return share;
+}
+
/*
* set VT-d page table directory to EPT table if allowed
*/
@@ -1779,11 +1782,13 @@ void iommu_set_pgd(struct domain *d)
ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled );
- if ( !iommu_hap_pt_share )
- return;
-
+ iommu_hap_pt_share = vtd_ept_share();
pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
+
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "VT-d page table %s with EPT table\n",
+ iommu_hap_pt_share ? "shares" : "not sharing");
}
static int domain_rmrr_mapped(struct domain *d,
@@ -2036,27 +2041,6 @@ static int init_vtd_hw(void)
}
}
iommu_flush_all();
-
- /*
- * Determine whether EPT and VT-d page tables can be shared or not.
- */
- iommu_hap_pt_share = TRUE;
- for_each_drhd_unit ( drhd )
- {
- iommu = drhd->iommu;
- if ( (drhd->iommu->nr_pt_levels != VTD_PAGE_TABLE_LEVEL_4) ||
- !vtd_ept_page_compatible(drhd->iommu) )
- iommu_hap_pt_share = FALSE;
- }
-
- /* keep boot flag sharept as safe fallback. remove after feature matures */
- if ( !sharept )
- iommu_hap_pt_share = FALSE;
-
- dprintk(XENLOG_INFO VTDPREFIX,
- "VT-d page table %sshared with EPT table\n",
- iommu_hap_pt_share ? "" : "not ");
-
return 0;
}
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 934ffab059..4b93c31e38 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -85,7 +85,7 @@ int iommu_get_device_group(struct domain *d, u8 bus, u8 devfn,
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
unsigned int flags);
int iommu_unmap_page(struct domain *d, unsigned long gfn);
-void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int present);
+void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int order, int present);
void iommu_set_pgd(struct domain *d);
void iommu_domain_teardown(struct domain *d);
int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);