aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWei Wang <wei.wang2@amd.com>2011-01-27 16:10:52 +0000
committerWei Wang <wei.wang2@amd.com>2011-01-27 16:10:52 +0000
commit24c3ed18af8a89d0066f5668ad91a0c38c98f7a2 (patch)
tree019aa625921eaabb519019de23f64391364cd24a
parent3bbbb0c96c03814cb208f6bf9167c95a94f0bbcb (diff)
downloadxen-24c3ed18af8a89d0066f5668ad91a0c38c98f7a2.tar.gz
xen-24c3ed18af8a89d0066f5668ad91a0c38c98f7a2.tar.bz2
xen-24c3ed18af8a89d0066f5668ad91a0c38c98f7a2.zip
amd iommu: reduce io page level for hvm guest (1/3)
Since in most case, 2 or 3 - level IO page tables are sufficient, this patch updates page table level for device assignment to reduces overhead of dma translation Signed-off-by: Wei Wang <wei.wang2@amd.com>
-rw-r--r--xen/drivers/passthrough/amd/iommu_map.c8
-rw-r--r--xen/drivers/passthrough/amd/pci_amd_iommu.c9
2 files changed, 11 insertions, 6 deletions
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index 536da0da84..d973a123ea 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -411,10 +411,14 @@ static u64 iommu_l2e_from_pfn(struct page_info *table, int level,
void *pde = NULL;
void *table_vaddr;
u64 next_table_maddr = 0;
+ unsigned int lowest = 1;
- BUG_ON( table == NULL || level == 0 );
+ BUG_ON( table == NULL || level < lowest );
- while ( level > 1 )
+ if ( level == lowest )
+ return page_to_maddr(table);
+
+ while ( level > lowest )
{
offset = io_pfn >> ((PTE_PER_TABLE_SHIFT *
(level - IOMMU_PAGING_MODE_LEVEL_1)));
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 2442027fac..6a79efedb8 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -190,10 +190,7 @@ static int get_paging_mode(unsigned long entries)
{
int level = 1;
- BUG_ON(!max_page);
-
- if ( entries > max_page )
- entries = max_page;
+ BUG_ON( !entries );
while ( entries > PTE_PER_TABLE_SIZE )
{
@@ -278,6 +275,7 @@ static int reassign_device( struct domain *source, struct domain *target,
struct pci_dev *pdev;
struct amd_iommu *iommu;
int bdf;
+ struct hvm_iommu *t = domain_hvm_iommu(target);
ASSERT(spin_is_locked(&pcidevs_lock));
pdev = pci_get_pdev_by_domain(source, bus, devfn);
@@ -300,6 +298,9 @@ static int reassign_device( struct domain *source, struct domain *target,
list_move(&pdev->domain_list, &target->arch.pdev_list);
pdev->domain = target;
+ if ( target->max_pages > 0 )
+ t->paging_mode = get_paging_mode(target->max_pages);
+
amd_iommu_setup_domain_device(target, iommu, bdf);
AMD_IOMMU_DEBUG("Re-assign %02x:%02x.%x from domain %d to domain %d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),