aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-06-04 10:38:21 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-06-04 10:38:21 +0100
commit78210cee6d119c6781d7eb935f561d479d367a47 (patch)
treee161fb00bca6ff77cf528eeae6681e0699b564ac
parent46245b38853b4bd6425be0c2de118433b73b5fa4 (diff)
downloadxen-78210cee6d119c6781d7eb935f561d479d367a47.tar.gz
xen-78210cee6d119c6781d7eb935f561d479d367a47.tar.bz2
xen-78210cee6d119c6781d7eb935f561d479d367a47.zip
iommu: Map correct permissions in IOMMU on grant read-only map request.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com> xen-unstable changeset: 21476:69a8e9b6961f xen-unstable date: Fri May 28 09:08:00 2010 +0100 iommu: Specify access permissions to iommu_map_page(). Signed-off-by: Keir Fraser <keir.fraser@citrix.com> xen-unstable changeset: 21475:9ee5c292b112 xen-unstable date: Fri May 28 08:48:50 2010 +0100
-rw-r--r--xen/arch/ia64/xen/mm.c3
-rw-r--r--xen/arch/x86/mm.c3
-rw-r--r--xen/arch/x86/mm/hap/p2m-ept.c7
-rw-r--r--xen/arch/x86/mm/p2m.c9
-rw-r--r--xen/arch/x86/x86_64/mm.c2
-rw-r--r--xen/common/grant_table.c28
-rw-r--r--xen/drivers/passthrough/amd/iommu_map.c10
-rw-r--r--xen/drivers/passthrough/amd/pci_amd_iommu.c3
-rw-r--r--xen/drivers/passthrough/iommu.c8
-rw-r--r--xen/drivers/passthrough/vtd/ia64/vtd.c3
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c10
-rw-r--r--xen/drivers/passthrough/vtd/x86/vtd.c3
-rw-r--r--xen/include/asm-x86/hvm/svm/amd-iommu-defs.h3
-rw-r--r--xen/include/asm-x86/hvm/svm/amd-iommu-proto.h3
-rw-r--r--xen/include/xen/iommu.h13
15 files changed, 72 insertions, 36 deletions
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
index bcaadba8e5..c8ad97673c 100644
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -2897,7 +2897,8 @@ __guest_physmap_add_page(struct domain *d, unsigned long gpfn,
int i, j;
j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
for(i = 0 ; i < j; i++)
- iommu_map_page(d, gpfn*j + i, mfn*j + i);
+ iommu_map_page(d, gpfn*j + i, mfn*j + i,
+ IOMMUF_readable|IOMMUF_writable);
}
}
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 8002c3d67f..c91790ebbb 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2399,7 +2399,8 @@ static int __get_page_type(struct page_info *page, unsigned long type,
iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
else if ( type == PGT_writable_page )
iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
- page_to_mfn(page));
+ page_to_mfn(page),
+ IOMMUF_readable|IOMMUF_writable);
}
}
diff --git a/xen/arch/x86/mm/hap/p2m-ept.c b/xen/arch/x86/mm/hap/p2m-ept.c
index 264a101f12..445d1380e2 100644
--- a/xen/arch/x86/mm/hap/p2m-ept.c
+++ b/xen/arch/x86/mm/hap/p2m-ept.c
@@ -350,10 +350,13 @@ out:
if ( order == EPT_TABLE_ORDER )
{
for ( i = 0; i < (1 << order); i++ )
- iommu_map_page(d, gfn - offset + i, mfn_x(mfn) - offset + i);
+ iommu_map_page(
+ d, gfn - offset + i, mfn_x(mfn) - offset + i,
+ IOMMUF_readable|IOMMUF_writable);
}
else if ( !order )
- iommu_map_page(d, gfn, mfn_x(mfn));
+ iommu_map_page(
+ d, gfn, mfn_x(mfn), IOMMUF_readable|IOMMUF_writable);
}
else
{
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 9e186ed1ff..768b5b8f3e 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1290,7 +1290,8 @@ p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
{
if ( p2mt == p2m_ram_rw )
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_map_page(d, gfn+i, mfn_x(mfn)+i );
+ iommu_map_page(d, gfn+i, mfn_x(mfn)+i,
+ IOMMUF_readable|IOMMUF_writable);
else
for ( int i = 0; i < (1UL << page_order); i++ )
iommu_unmap_page(d, gfn+i);
@@ -2108,12 +2109,16 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
if ( need_iommu(d) && t == p2m_ram_rw )
{
for ( i = 0; i < (1 << page_order); i++ )
- if ( (rc = iommu_map_page(d, mfn + i, mfn + i)) != 0 )
+ {
+ rc = iommu_map_page(
+ d, mfn + i, mfn + i, IOMMUF_readable|IOMMUF_writable);
+ if ( rc != 0 )
{
while ( i-- > 0 )
iommu_unmap_page(d, mfn + i);
return rc;
}
+ }
}
return 0;
}
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 71d0a11ea5..41632d56d7 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1461,7 +1461,7 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
goto destroy_m2p;
for ( i = spfn; i < epfn; i++ )
- if ( iommu_map_page(dom0, i, i) )
+ if ( iommu_map_page(dom0, i, i, IOMMUF_readable|IOMMUF_writable) )
break;
if ( i != epfn )
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 98a79637d6..cf20a08809 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -596,16 +596,20 @@ __gnttab_map_grant_ref(
goto undo_out;
}
- if ( (!is_hvm_domain(ld) && need_iommu(ld)) &&
- !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
- (act_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
+ if ( !is_hvm_domain(ld) && need_iommu(ld) )
{
- /* Shouldn't happen, because you can't use iommu in a HVM
- * domain. */
+ int err = 0;
+ /* Shouldn't happen, because you can't use iommu in a HVM domain. */
BUG_ON(paging_mode_translate(ld));
/* We're not translated, so we know that gmfns and mfns are
the same things, so the IOMMU entry is always 1-to-1. */
- if ( iommu_map_page(ld, frame, frame) )
+ if ( (act_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
+ !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
+ err = iommu_map_page(ld, frame, frame,
+ IOMMUF_readable|IOMMUF_writable);
+ else if ( act_pin && !old_pin )
+ err = iommu_map_page(ld, frame, frame, IOMMUF_readable);
+ if ( err )
{
rc = GNTST_general_error;
goto undo_out;
@@ -779,12 +783,16 @@ __gnttab_unmap_common(
act->pin -= GNTPIN_hstw_inc;
}
- if ( (!is_hvm_domain(ld) && need_iommu(ld)) &&
- (old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
- !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
+ if ( !is_hvm_domain(ld) && need_iommu(ld) )
{
+ int err = 0;
BUG_ON(paging_mode_translate(ld));
- if ( iommu_unmap_page(ld, op->frame) )
+ if ( old_pin && !act->pin )
+ err = iommu_unmap_page(ld, op->frame);
+ else if ( (old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
+ !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
+ err = iommu_map_page(ld, op->frame, op->frame, IOMMUF_readable);
+ if ( err )
{
rc = GNTST_general_error;
goto unmap_out;
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index 6bd64f9556..178e7672c8 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -450,12 +450,11 @@ static u64 iommu_l2e_from_pfn(struct page_info *table, int level,
return next_table_maddr;
}
-int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
+int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+ unsigned int flags)
{
u64 iommu_l2e;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- int iw = IOMMU_IO_WRITE_ENABLED;
- int ir = IOMMU_IO_READ_ENABLED;
BUG_ON( !hd->root_table );
@@ -469,7 +468,10 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
domain_crash(d);
return -EFAULT;
}
- set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
+
+ set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT,
+ !!(flags & IOMMUF_writable),
+ !!(flags & IOMMUF_readable));
spin_unlock(&hd->mapping_lock);
return 0;
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 6db10faf68..33797acf01 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -235,7 +235,8 @@ static int amd_iommu_domain_init(struct domain *domain)
{
/* setup 1:1 page table for dom0 */
for ( i = 0; i < max_page; i++ )
- amd_iommu_map_page(domain, i, i);
+ amd_iommu_map_page(domain, i, i,
+ IOMMUF_readable|IOMMUF_writable);
}
amd_iommu_setup_dom0_devices(domain);
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index f83e0746ef..42b379cb6b 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -172,7 +172,8 @@ static int iommu_populate_page_table(struct domain *d)
{
BUG_ON(SHARED_M2P(mfn_to_gmfn(d, page_to_mfn(page))));
rc = hd->platform_ops->map_page(
- d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page));
+ d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page),
+ IOMMUF_readable|IOMMUF_writable);
if (rc)
{
spin_unlock(&d->page_alloc_lock);
@@ -217,14 +218,15 @@ void iommu_domain_destroy(struct domain *d)
}
}
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
+int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+ unsigned int flags)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- return hd->platform_ops->map_page(d, gfn, mfn);
+ return hd->platform_ops->map_page(d, gfn, mfn, flags);
}
int iommu_unmap_page(struct domain *d, unsigned long gfn)
diff --git a/xen/drivers/passthrough/vtd/ia64/vtd.c b/xen/drivers/passthrough/vtd/ia64/vtd.c
index 660bcd3512..4364c55648 100644
--- a/xen/drivers/passthrough/vtd/ia64/vtd.c
+++ b/xen/drivers/passthrough/vtd/ia64/vtd.c
@@ -108,7 +108,8 @@ static int do_dom0_iommu_mapping(unsigned long start, unsigned long end,
pfn = page_addr >> PAGE_SHIFT;
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
- iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j));
+ iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j),
+ IOMMUF_readable|IOMMUF_writable);
page_addr += PAGE_SIZE;
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 1c47d5c6c8..67862caab7 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1571,7 +1571,8 @@ void iommu_domain_teardown(struct domain *d)
}
static int intel_iommu_map_page(
- struct domain *d, unsigned long gfn, unsigned long mfn)
+ struct domain *d, unsigned long gfn, unsigned long mfn,
+ unsigned int flags)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct acpi_drhd_unit *drhd;
@@ -1598,7 +1599,9 @@ static int intel_iommu_map_page(
pte = page + (gfn & LEVEL_MASK);
pte_present = dma_pte_present(*pte);
dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
- dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
+ dma_set_pte_prot(*pte,
+ ((flags & IOMMUF_readable) ? DMA_PTE_READ : 0) |
+ ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0));
/* Set the SNP on leaf page table if Snoop Control available */
if ( iommu_snoop )
@@ -1680,7 +1683,8 @@ static int rmrr_identity_mapping(struct domain *d,
while ( base_pfn < end_pfn )
{
- if ( intel_iommu_map_page(d, base_pfn, base_pfn) )
+ if ( intel_iommu_map_page(d, base_pfn, base_pfn,
+ IOMMUF_readable|IOMMUF_writable) )
return -1;
base_pfn++;
}
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c
index 580fc03eda..bdd980c264 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -153,7 +153,8 @@ void iommu_set_dom0_mapping(struct domain *d)
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
- iommu_map_page(d, (i*tmp+j), (i*tmp+j));
+ iommu_map_page(d, (i*tmp+j), (i*tmp+j),
+ IOMMUF_readable|IOMMUF_writable);
if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
process_pending_softirqs();
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
index 463121bf77..56aeb8a912 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
@@ -388,9 +388,6 @@
#define MAX_AMD_IOMMUS 32
#define IOMMU_PAGE_TABLE_LEVEL_3 3
#define IOMMU_PAGE_TABLE_LEVEL_4 4
-#define IOMMU_IO_WRITE_ENABLED 1
-#define IOMMU_IO_READ_ENABLED 1
-#define HACK_BIOS_SETTINGS 0
/* interrupt remapping table */
#define INT_REMAP_INDEX_DM_MASK 0x1C00
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 26d859659e..594bb21e22 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -52,7 +52,8 @@ int __init amd_iommu_init(void);
int __init amd_iommu_update_ivrs_mapping_acpi(void);
/* mapping functions */
-int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
+int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+ unsigned int flags);
int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
u64 amd_iommu_get_next_table_from_pte(u32 *entry);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 3a418bc5ba..5181ffc5be 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -68,8 +68,16 @@ int assign_device(struct domain *d, u8 bus, u8 devfn);
int deassign_device(struct domain *d, u8 bus, u8 devfn);
int iommu_get_device_group(struct domain *d, u8 bus, u8 devfn,
XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs);
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
+
+/* iommu_map_page() takes flags to direct the mapping operation. */
+#define _IOMMUF_readable 0
+#define IOMMUF_readable (1u<<_IOMMUF_readable)
+#define _IOMMUF_writable 1
+#define IOMMUF_writable (1u<<_IOMMUF_writable)
+int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+ unsigned int flags);
int iommu_unmap_page(struct domain *d, unsigned long gfn);
+
void iommu_domain_teardown(struct domain *d);
int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);
int dpci_ioport_intercept(ioreq_t *p);
@@ -102,7 +110,8 @@ struct iommu_ops {
int (*remove_device)(struct pci_dev *pdev);
int (*assign_device)(struct domain *d, u8 bus, u8 devfn);
void (*teardown)(struct domain *d);
- int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn);
+ int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn,
+ unsigned int flags);
int (*unmap_page)(struct domain *d, unsigned long gfn);
int (*reassign_device)(struct domain *s, struct domain *t,
u8 bus, u8 devfn);