aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2007-12-05 10:53:47 +0000
committerKeir Fraser <keir.fraser@citrix.com>2007-12-05 10:53:47 +0000
commit614f395bfd671d3a70479792121af87ad8bf3b07 (patch)
tree2b632eefca4b1e5b6bacadfe11a4b05b7cc90902
parent2b795c6e3709bd1049fe7ecdd4b6e95ca8ab3dc7 (diff)
downloadxen-614f395bfd671d3a70479792121af87ad8bf3b07.tar.gz
xen-614f395bfd671d3a70479792121af87ad8bf3b07.tar.bz2
xen-614f395bfd671d3a70479792121af87ad8bf3b07.zip
vt-d: Use bitmap to solve domain-id limitation issue.
The Capability register reports the domain-id width supported by hardware. For implementations supporting less than 16-bit domainids, unused bits of domain identifier field(87:72) in Context entry are treated as reserved by hardware. For example, for an implementation supporting 4-bit domain-ids, bits 87:76 of this field are treated as reserved. 16 is a small number, overflow is easy to happen. What's more, context-entries programmed with the same domain identifier must always reference the same address translation structure (through the ASR field). So Dom16 will conflict with Dom0, and device assignment fails. This patch implements a domaid id bitmap to solve above issue. Signed-off-by: Weidong Han <weidong.han@intel.com> Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
-rw-r--r--xen/arch/x86/hvm/vmx/vtd/intel-iommu.c75
-rw-r--r--xen/include/asm-x86/hvm/iommu.h1
-rw-r--r--xen/include/asm-x86/hvm/vmx/intel-iommu.h2
3 files changed, 60 insertions, 18 deletions
diff --git a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c
index 8cd2098036..8a773c559e 100644
--- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c
+++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c
@@ -35,11 +35,49 @@
#include "pci_regs.h"
#include "msi.h"
+#define domain_iommu_domid(d) ((d)->arch.hvm_domain.hvm_iommu.iommu_domid)
+
#define VTDPREFIX
extern void print_iommu_regs(struct acpi_drhd_unit *drhd);
extern void print_vtd_entries(struct domain *d, int bus, int devfn,
unsigned long gmfn);
+static spinlock_t domid_bitmap_lock; /* protect domain id bitmap */
+static int domid_bitmap_size; /* domain id bitmap size in bit */
+static void *domid_bitmap; /* iommu domain id bitmap */
+
+#define DID_FIELD_WIDTH 16
+#define DID_HIGH_OFFSET 8
+static void context_set_domain_id(struct context_entry *context,
+ struct domain *d)
+{
+ unsigned long flags;
+ domid_t iommu_domid = domain_iommu_domid(d);
+
+ if ( iommu_domid == 0 )
+ {
+ spin_lock_irqsave(&domid_bitmap_lock, flags);
+ iommu_domid = find_first_zero_bit(domid_bitmap, domid_bitmap_size);
+ set_bit(iommu_domid, domid_bitmap);
+ spin_unlock_irqrestore(&domid_bitmap_lock, flags);
+ d->arch.hvm_domain.hvm_iommu.iommu_domid = iommu_domid;
+ }
+
+ context->hi &= (1 << DID_HIGH_OFFSET) - 1;
+ context->hi |= iommu_domid << DID_HIGH_OFFSET;
+}
+
+static void iommu_domid_release(struct domain *d)
+{
+ domid_t iommu_domid = domain_iommu_domid(d);
+
+ if ( iommu_domid != 0 )
+ {
+ d->arch.hvm_domain.hvm_iommu.iommu_domid = 0;
+ clear_bit(iommu_domid, domid_bitmap);
+ }
+}
+
unsigned int x86_clflush_size;
void clflush_cache_range(void *adr, int size)
{
@@ -276,9 +314,6 @@ static int __iommu_flush_context(
unsigned long flag;
unsigned long start_time;
- /* Domain id in context is 1 based */
- did++;
-
/*
* In the non-present entry flush case, if hardware doesn't cache
* non-present entry we do nothing and if hardware cache non-present
@@ -363,9 +398,6 @@ static int __iommu_flush_iotlb(struct iommu *iommu, u16 did,
unsigned long flag;
unsigned long start_time;
- /* Domain id in context is 1 based */
- did++;
-
/*
* In the non-present entry flush case, if hardware doesn't cache
* non-present entry we do nothing and if hardware cache non-present
@@ -534,7 +566,8 @@ static void dma_pte_clear_one(struct domain *domain, u64 addr)
{
iommu = drhd->iommu;
if ( cap_caching_mode(iommu->cap) )
- iommu_flush_iotlb_psi(iommu, domain->domain_id, addr, 1, 0);
+ iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
+ addr, 1, 0);
else if (cap_rwbf(iommu->cap))
iommu_flush_write_buffer(iommu);
}
@@ -1036,7 +1069,7 @@ static int domain_context_mapping_one(
* domain_id 0 is not valid on Intel's IOMMU, force domain_id to
* be 1 based as required by intel's iommu hw.
*/
- context_set_domain_id(*context, domain->domain_id);
+ context_set_domain_id(context, domain);
context_set_address_width(*context, hd->agaw);
if ( ecap_pass_thru(iommu->ecap) )
@@ -1069,12 +1102,12 @@ static int domain_context_mapping_one(
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
context->hi, context->lo, hd->pgd);
- if ( iommu_flush_context_device(iommu, domain->domain_id,
+ if ( iommu_flush_context_device(iommu, domain_iommu_domid(domain),
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT, 1) )
iommu_flush_write_buffer(iommu);
else
- iommu_flush_iotlb_dsi(iommu, domain->domain_id, 0);
+ iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
@@ -1414,6 +1447,8 @@ void iommu_domain_teardown(struct domain *d)
if ( list_empty(&acpi_drhd_units) )
return;
+ iommu_domid_release(d);
+
#if CONFIG_PAGING_LEVELS == 3
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
@@ -1492,7 +1527,7 @@ int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
{
iommu = drhd->iommu;
if ( cap_caching_mode(iommu->cap) )
- iommu_flush_iotlb_psi(iommu, d->domain_id,
+ iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
gfn << PAGE_SHIFT_4K, 1, 0);
else if ( cap_rwbf(iommu->cap) )
iommu_flush_write_buffer(iommu);
@@ -1556,7 +1591,8 @@ int iommu_page_mapping(struct domain *domain, dma_addr_t iova,
{
iommu = drhd->iommu;
if ( cap_caching_mode(iommu->cap) )
- iommu_flush_iotlb_psi(iommu, domain->domain_id, iova, index, 0);
+ iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
+ iova, index, 0);
else if ( cap_rwbf(iommu->cap) )
iommu_flush_write_buffer(iommu);
}
@@ -1581,7 +1617,7 @@ void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry)
{
iommu = drhd->iommu;
if ( cap_caching_mode(iommu->cap) )
- iommu_flush_iotlb_psi(iommu, d->domain_id,
+ iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
gfn << PAGE_SHIFT_4K, 1, 0);
else if ( cap_rwbf(iommu->cap) )
iommu_flush_write_buffer(iommu);
@@ -1760,6 +1796,7 @@ int iommu_setup(void)
if ( !vtd_enabled )
return 0;
+ spin_lock_init(&domid_bitmap_lock);
INIT_LIST_HEAD(&hd->pdev_list);
/* start from scratch */
@@ -1768,12 +1805,18 @@ int iommu_setup(void)
/* setup clflush size */
x86_clflush_size = ((cpuid_ebx(1) >> 8) & 0xff) * 8;
- /*
- * allocate IO page directory page for the domain.
- */
+ /* Allocate IO page directory page for the domain. */
drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
iommu = drhd->iommu;
+ /* Allocate domain id bitmap, and set bit 0 as reserved */
+ domid_bitmap_size = cap_ndoms(iommu->cap);
+ domid_bitmap = xmalloc_bytes(domid_bitmap_size / 8);
+ if ( domid_bitmap == NULL )
+ goto error;
+ memset(domid_bitmap, 0, domid_bitmap_size / 8);
+ set_bit(0, domid_bitmap);
+
/* setup 1:1 page table for dom0 */
for ( i = 0; i < max_page; i++ )
iommu_map_page(dom0, i, i);
diff --git a/xen/include/asm-x86/hvm/iommu.h b/xen/include/asm-x86/hvm/iommu.h
index 44b3374488..210530947f 100644
--- a/xen/include/asm-x86/hvm/iommu.h
+++ b/xen/include/asm-x86/hvm/iommu.h
@@ -42,6 +42,7 @@ struct hvm_iommu {
spinlock_t mapping_lock; /* io page table lock */
int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
struct list_head g2m_ioport_list; /* guest to machine ioport mapping */
+ domid_t iommu_domid; /* domain id stored in iommu */
/* amd iommu support */
int domain_id;
diff --git a/xen/include/asm-x86/hvm/vmx/intel-iommu.h b/xen/include/asm-x86/hvm/vmx/intel-iommu.h
index 8fd63ce63e..f755598b61 100644
--- a/xen/include/asm-x86/hvm/vmx/intel-iommu.h
+++ b/xen/include/asm-x86/hvm/vmx/intel-iommu.h
@@ -227,8 +227,6 @@ struct context_entry {
do {(c).lo &= 0xfff; (c).lo |= (val) & PAGE_MASK_4K ;} while(0)
#define context_set_address_width(c, val) \
do {(c).hi &= 0xfffffff8; (c).hi |= (val) & 7;} while(0)
-#define context_set_domain_id(c, val) \
- do {(c).hi &= 0xff; (c).hi |= ((val + 1) & ((1 << 16) - 1)) << 8;} while(0)
#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while(0)
/* page table handling */