aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-07-21 09:10:56 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-07-21 09:10:56 +0100
commit35d2934c08bcd0e4a5fb8325d018a5252e5d0216 (patch)
treea0960338cf8816dff7e236a5b8c0d832b6d59ba9
parentb02d5d2cd3f1b36565b0a233425653330b857619 (diff)
downloadxen-35d2934c08bcd0e4a5fb8325d018a5252e5d0216.tar.gz
xen-35d2934c08bcd0e4a5fb8325d018a5252e5d0216.tar.bz2
xen-35d2934c08bcd0e4a5fb8325d018a5252e5d0216.zip
vtd: Do not update-and-flush an IOMMU PTE if it does not change.
There are cases of identical repeated calls to iommu_map_page(). Flushing is slow, so it's worth detecting these and bailing early. Signed-off-by: Keir Fraser <keir.fraser@citrix.com> xen-unstable changeset: 21813:07b718833c74 xen-unstable date: Fri Jul 16 17:41:53 2010 +0100
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 53f81a6e40..025d27e160 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1584,9 +1584,8 @@ static int intel_iommu_map_page(
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
- struct dma_pte *page = NULL, *pte = NULL;
+ struct dma_pte *page = NULL, *pte = NULL, old, new = { 0 };
u64 pg_maddr;
- int pte_present;
int flush_dev_iotlb;
int iommu_domid;
@@ -1604,15 +1603,22 @@ static int intel_iommu_map_page(
}
page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
pte = page + (gfn & LEVEL_MASK);
- pte_present = dma_pte_present(*pte);
- dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
- dma_set_pte_prot(*pte,
+ old = *pte;
+ dma_set_pte_addr(new, (paddr_t)mfn << PAGE_SHIFT_4K);
+ dma_set_pte_prot(new,
((flags & IOMMUF_readable) ? DMA_PTE_READ : 0) |
((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0));
/* Set the SNP on leaf page table if Snoop Control available */
if ( iommu_snoop )
- dma_set_pte_snp(*pte);
+ dma_set_pte_snp(new);
+
+ if ( old.val == new.val )
+ {
+ spin_unlock(&hd->mapping_lock);
+ return 0;
+ }
+ *pte = new;
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
spin_unlock(&hd->mapping_lock);
@@ -1635,7 +1641,7 @@ static int intel_iommu_map_page(
continue;
if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
(paddr_t)gfn << PAGE_SHIFT_4K, 1,
- !pte_present, flush_dev_iotlb) )
+ !dma_pte_present(old), flush_dev_iotlb) )
iommu_flush_write_buffer(iommu);
}