aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-07-21 09:10:27 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-07-21 09:10:27 +0100
commitb02d5d2cd3f1b36565b0a233425653330b857619 (patch)
tree2bdf92746a47be58546dc212dc03499c799ac497
parent707a7f9424084fc98b94d76dd16b4cd63b1fe41c (diff)
downloadxen-b02d5d2cd3f1b36565b0a233425653330b857619.tar.gz
xen-b02d5d2cd3f1b36565b0a233425653330b857619.tar.bz2
xen-b02d5d2cd3f1b36565b0a233425653330b857619.zip
iommu: New options iommu=dom-strict and iommu=dom0-passthrough
The former strips dom0 of its usual 1:1 mapping of all memory, and only provides it with mappings of its own memory, like any other domain. The latter is a new consistent name for iommu=passthrough. Signed-off-by: Keir Fraser <keir.fraser@citrix.com> xen-unstable changeset: 21771:42ccccfe1a6a xen-unstable date: Fri Jul 09 16:45:42 2010 +0100 iommu: Map dom0 initial allocation in 'dom0-strict' iommu mode. Signed-off-by: Keir Fraser <keir.fraser@citrix.com> xen-unstable changeset: 21812:e382656e4dcc xen-unstable date: Fri Jul 16 16:19:51 2010 +0100
-rw-r--r--xen/arch/ia64/xen/domain.c2
-rw-r--r--xen/arch/x86/domain_build.c2
-rw-r--r--xen/arch/x86/x86_64/mm.c22
-rw-r--r--xen/drivers/passthrough/amd/pci_amd_iommu.c34
-rw-r--r--xen/drivers/passthrough/iommu.c57
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c31
-rw-r--r--xen/include/xen/iommu.h2
7 files changed, 103 insertions, 47 deletions
diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c
index 854c8b2dee..d4f763a69a 100644
--- a/xen/arch/ia64/xen/domain.c
+++ b/xen/arch/ia64/xen/domain.c
@@ -2296,6 +2296,8 @@ int __init construct_dom0(struct domain *d,
physdev_init_dom0(d);
+ iommu_dom0_init(d);
+
return 0;
}
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 6e7acaf8ee..f9d1513f78 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -1134,6 +1134,8 @@ int __init construct_dom0(
BUG_ON(rc != 0);
+ iommu_dom0_init(dom0);
+
return 0;
}
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 41632d56d7..47b2f8bc98 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1460,12 +1460,18 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
if ( ret )
goto destroy_m2p;
- for ( i = spfn; i < epfn; i++ )
- if ( iommu_map_page(dom0, i, i, IOMMUF_readable|IOMMUF_writable) )
- break;
-
- if ( i != epfn )
- goto destroy_iommu;
+ if ( !need_iommu(dom0) )
+ {
+ for ( i = spfn; i < epfn; i++ )
+ if ( iommu_map_page(dom0, i, i, IOMMUF_readable|IOMMUF_writable) )
+ break;
+ if ( i != epfn )
+ {
+ while (i-- > old_max)
+ iommu_unmap_page(dom0, i);
+ goto destroy_m2p;
+ }
+ }
/* We can't revert any more */
transfer_pages_to_heap(&info);
@@ -1474,10 +1480,6 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
return 0;
-destroy_iommu:
- while (i-- > old_max)
- iommu_unmap_page(dom0, i);
-
destroy_m2p:
destroy_m2p_mapping(&info);
max_page = old_max;
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 33797acf01..d4e06ffc86 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -212,9 +212,9 @@ static int get_paging_mode(unsigned long entries)
return level;
}
-static int amd_iommu_domain_init(struct domain *domain)
+static int amd_iommu_domain_init(struct domain *d)
{
- struct hvm_iommu *hd = domain_hvm_iommu(domain);
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
/* allocate page directroy */
if ( allocate_domain_resources(hd) != 0 )
@@ -224,27 +224,26 @@ static int amd_iommu_domain_init(struct domain *domain)
return -ENOMEM;
}
- hd->paging_mode = is_hvm_domain(domain)?
+ hd->paging_mode = is_hvm_domain(d) ?
IOMMU_PAGE_TABLE_LEVEL_4 : get_paging_mode(max_page);
- if ( domain->domain_id == 0 )
- {
- unsigned long i;
+ hd->domain_id = d->domain_id;
- if ( !iommu_passthrough )
- {
- /* setup 1:1 page table for dom0 */
- for ( i = 0; i < max_page; i++ )
- amd_iommu_map_page(domain, i, i,
- IOMMUF_readable|IOMMUF_writable);
- }
+ return 0;
+}
- amd_iommu_setup_dom0_devices(domain);
- }
+static void amd_iommu_dom0_init(struct domain *d)
+{
+ unsigned long i;
- hd->domain_id = domain->domain_id;
+ if ( !iommu_passthrough && !need_iommu(d) )
+ {
+ /* Set up 1:1 page table for dom0 */
+ for ( i = 0; i < max_page; i++ )
+ amd_iommu_map_page(d, i, i, IOMMUF_readable|IOMMUF_writable);
+ }
- return 0;
+ amd_iommu_setup_dom0_devices(d);
}
static void amd_iommu_disable_domain_device(
@@ -433,6 +432,7 @@ static int amd_iommu_group_id(u8 bus, u8 devfn)
const struct iommu_ops amd_iommu_ops = {
.init = amd_iommu_domain_init,
+ .dom0_init = amd_iommu_dom0_init,
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
.assign_device = amd_iommu_assign_device,
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 4fa56cf920..62c801c1c2 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -18,6 +18,7 @@
#include <asm/hvm/iommu.h>
#include <xen/paging.h>
#include <xen/guest_access.h>
+#include <xen/softirq.h>
static void parse_iommu_param(char *s);
static int iommu_populate_page_table(struct domain *d);
@@ -30,8 +31,8 @@ static int iommu_populate_page_table(struct domain *d);
* force|required Don't boot unless IOMMU is enabled
* workaround_bios_bug Workaround some bios issue to still enable
VT-d, don't guarantee security
- * passthrough Enable VT-d DMA passthrough (no DMA
- * translation for Dom0)
+ * dom0-passthrough No DMA translation at all for Dom0
+ * dom0-strict No 1:1 memory mapping for Dom0
* no-snoop Disable VT-d Snoop Control
* no-qinval Disable VT-d Queued Invalidation
* no-intremap Disable VT-d Interrupt Remapping
@@ -39,6 +40,7 @@ static int iommu_populate_page_table(struct domain *d);
custom_param("iommu", parse_iommu_param);
bool_t __read_mostly iommu_enabled = 1;
bool_t __read_mostly force_iommu;
+bool_t __read_mostly iommu_dom0_strict;
bool_t __read_mostly iommu_verbose;
bool_t __read_mostly iommu_workaround_bios_bug;
bool_t __read_mostly iommu_passthrough;
@@ -64,8 +66,6 @@ static void __init parse_iommu_param(char *s)
force_iommu = 1;
else if ( !strcmp(s, "workaround_bios_bug") )
iommu_workaround_bios_bug = 1;
- else if ( !strcmp(s, "passthrough") )
- iommu_passthrough = 1;
else if ( !strcmp(s, "verbose") )
iommu_verbose = 1;
else if ( !strcmp(s, "no-snoop") )
@@ -78,14 +78,18 @@ static void __init parse_iommu_param(char *s)
amd_iommu_debug = 1;
else if ( !strcmp(s, "amd-iommu-perdev-intremap") )
amd_iommu_perdev_intremap = 1;
+ else if ( !strcmp(s, "dom0-passthrough") )
+ iommu_passthrough = 1;
+ else if ( !strcmp(s, "dom0-strict") )
+ iommu_dom0_strict = 1;
s = ss + 1;
} while ( ss );
}
-int iommu_domain_init(struct domain *domain)
+int iommu_domain_init(struct domain *d)
{
- struct hvm_iommu *hd = domain_hvm_iommu(domain);
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
spin_lock_init(&hd->mapping_lock);
INIT_LIST_HEAD(&hd->g2m_ioport_list);
@@ -95,7 +99,36 @@ int iommu_domain_init(struct domain *domain)
return 0;
hd->platform_ops = iommu_get_ops();
- return hd->platform_ops->init(domain);
+ return hd->platform_ops->init(d);
+}
+
+void iommu_dom0_init(struct domain *d)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+ if ( !iommu_enabled )
+ return;
+
+ d->need_iommu = !!iommu_dom0_strict;
+ if ( need_iommu(d) )
+ {
+ struct page_info *page;
+ unsigned int i = 0;
+ page_list_for_each ( page, &d->page_list )
+ {
+ unsigned long mfn = page_to_mfn(page);
+ unsigned int mapping = IOMMUF_readable;
+ if ( ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
+ ((page->u.inuse.type_info & PGT_type_mask)
+ == PGT_writable_page) )
+ mapping |= IOMMUF_writable;
+ hd->platform_ops->map_page(d, mfn, mfn, mapping);
+ if ( !(i++ & 0xfffff) )
+ process_pending_softirqs();
+ }
+ }
+
+ return hd->platform_ops->dom0_init(d);
}
int iommu_add_device(struct pci_dev *pdev)
@@ -276,6 +309,9 @@ int iommu_setup(void)
{
int rc = -ENODEV;
+ if ( iommu_dom0_strict )
+ iommu_passthrough = 0;
+
if ( iommu_enabled )
{
rc = iommu_hardware_setup();
@@ -290,8 +326,15 @@ int iommu_setup(void)
iommu_snoop = 0;
iommu_qinval = 0;
iommu_intremap = 0;
+ iommu_passthrough = 0;
+ iommu_dom0_strict = 0;
}
printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
+ if ( iommu_enabled )
+ printk(" - Dom0 mode: %s\n",
+ iommu_passthrough ? "Passthrough" :
+ iommu_dom0_strict ? "Strict" : "Relaxed");
+
return rc;
}
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index f2f0d8ce05..53f81a6e40 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1170,29 +1170,33 @@ void __init iommu_free(struct acpi_drhd_unit *drhd)
static int intel_iommu_domain_init(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
- struct iommu *iommu;
- struct acpi_drhd_unit *drhd;
hd->agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if ( d->domain_id == 0 )
+ return 0;
+}
+
+static void intel_iommu_dom0_init(struct domain *d)
+{
+ struct iommu *iommu;
+ struct acpi_drhd_unit *drhd;
+
+ if ( !iommu_passthrough && !need_iommu(d) )
{
/* Set up 1:1 page table for dom0 */
iommu_set_dom0_mapping(d);
+ }
- setup_dom0_devices(d);
- setup_dom0_rmrr(d);
+ setup_dom0_devices(d);
+ setup_dom0_rmrr(d);
- iommu_flush_all();
+ iommu_flush_all();
- for_each_drhd_unit ( drhd )
- {
- iommu = drhd->iommu;
- iommu_enable_translation(iommu);
- }
+ for_each_drhd_unit ( drhd )
+ {
+ iommu = drhd->iommu;
+ iommu_enable_translation(iommu);
}
-
- return 0;
}
static int domain_context_mapping_one(
@@ -2155,6 +2159,7 @@ static void vtd_resume(void)
const struct iommu_ops intel_iommu_ops = {
.init = intel_iommu_domain_init,
+ .dom0_init = intel_iommu_dom0_init,
.add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device,
.assign_device = intel_iommu_assign_device,
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 065607a6cf..f5aa87668f 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -64,6 +64,7 @@ int intremap_enabled(void);
int iommu_add_device(struct pci_dev *pdev);
int iommu_remove_device(struct pci_dev *pdev);
int iommu_domain_init(struct domain *d);
+void iommu_dom0_init(struct domain *d);
void iommu_domain_destroy(struct domain *d);
int device_assigned(u8 bus, u8 devfn);
int assign_device(struct domain *d, u8 bus, u8 devfn);
@@ -108,6 +109,7 @@ void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci);
struct iommu_ops {
int (*init)(struct domain *d);
+ void (*dom0_init)(struct domain *d);
int (*add_device)(struct pci_dev *pdev);
int (*remove_device)(struct pci_dev *pdev);
int (*assign_device)(struct domain *d, u8 bus, u8 devfn);