aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/drivers/passthrough/amd/iommu_acpi.c32
-rw-r--r--xen/drivers/passthrough/amd/iommu_detect.c18
-rw-r--r--xen/drivers/passthrough/amd/iommu_init.c94
-rw-r--r--xen/drivers/passthrough/amd/iommu_intr.c85
-rw-r--r--xen/drivers/passthrough/amd/iommu_map.c4
-rw-r--r--xen/drivers/passthrough/amd/pci_amd_iommu.c42
-rw-r--r--xen/include/asm-x86/amd-iommu.h7
-rw-r--r--xen/include/asm-x86/hvm/svm/amd-iommu-proto.h10
8 files changed, 189 insertions, 103 deletions
diff --git a/xen/drivers/passthrough/amd/iommu_acpi.c b/xen/drivers/passthrough/amd/iommu_acpi.c
index 332537953b..a26914b6e9 100644
--- a/xen/drivers/passthrough/amd/iommu_acpi.c
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c
@@ -30,6 +30,7 @@ static unsigned short __initdata last_bdf;
static void __init add_ivrs_mapping_entry(
u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu)
{
+ struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(iommu->seg);
u8 sys_mgt, lint1_pass, lint0_pass, nmi_pass, ext_int_pass, init_pass;
ASSERT( ivrs_mappings != NULL );
@@ -118,9 +119,10 @@ static void __init reserve_iommu_exclusion_range_all(
}
static void __init reserve_unity_map_for_device(
- u16 bdf, unsigned long base,
+ u16 seg, u16 bdf, unsigned long base,
unsigned long length, u8 iw, u8 ir)
{
+ struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
unsigned long old_top, new_top;
/* need to extend unity-mapped range? */
@@ -147,6 +149,7 @@ static void __init reserve_unity_map_for_device(
static int __init register_exclusion_range_for_all_devices(
unsigned long base, unsigned long limit, u8 iw, u8 ir)
{
+ int seg = 0; /* XXX */
unsigned long range_top, iommu_top, length;
struct amd_iommu *iommu;
u16 bdf;
@@ -163,7 +166,7 @@ static int __init register_exclusion_range_for_all_devices(
/* reserve r/w unity-mapped page entries for devices */
/* note: these entries are part of the exclusion range */
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
- reserve_unity_map_for_device(bdf, base, length, iw, ir);
+ reserve_unity_map_for_device(seg, bdf, base, length, iw, ir);
/* push 'base' just outside of virtual address space */
base = iommu_top;
}
@@ -180,11 +183,13 @@ static int __init register_exclusion_range_for_all_devices(
static int __init register_exclusion_range_for_device(
u16 bdf, unsigned long base, unsigned long limit, u8 iw, u8 ir)
{
+ int seg = 0; /* XXX */
+ struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
unsigned long range_top, iommu_top, length;
struct amd_iommu *iommu;
u16 req;
- iommu = find_iommu_for_device(bdf);
+ iommu = find_iommu_for_device(seg, bdf);
if ( !iommu )
{
AMD_IOMMU_DEBUG("IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf);
@@ -202,8 +207,8 @@ static int __init register_exclusion_range_for_device(
length = range_top - base;
/* reserve unity-mapped page entries for device */
/* note: these entries are part of the exclusion range */
- reserve_unity_map_for_device(bdf, base, length, iw, ir);
- reserve_unity_map_for_device(req, base, length, iw, ir);
+ reserve_unity_map_for_device(seg, bdf, base, length, iw, ir);
+ reserve_unity_map_for_device(seg, req, base, length, iw, ir);
/* push 'base' just outside of virtual address space */
base = iommu_top;
@@ -240,11 +245,13 @@ static int __init register_exclusion_range_for_iommu_devices(
/* note: these entries are part of the exclusion range */
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
{
- if ( iommu == find_iommu_for_device(bdf) )
+ if ( iommu == find_iommu_for_device(iommu->seg, bdf) )
{
- reserve_unity_map_for_device(bdf, base, length, iw, ir);
- req = ivrs_mappings[bdf].dte_requestor_id;
- reserve_unity_map_for_device(req, base, length, iw, ir);
+ reserve_unity_map_for_device(iommu->seg, bdf, base, length,
+ iw, ir);
+ req = get_ivrs_mappings(iommu->seg)[bdf].dte_requestor_id;
+ reserve_unity_map_for_device(iommu->seg, req, base, length,
+ iw, ir);
}
}
@@ -627,7 +634,7 @@ static u16 __init parse_ivhd_device_extended_range(
}
static u16 __init parse_ivhd_device_special(
- union acpi_ivhd_device *ivhd_device,
+ union acpi_ivhd_device *ivhd_device, u16 seg,
u16 header_length, u16 block_length, struct amd_iommu *iommu)
{
u16 dev_length, bdf;
@@ -648,7 +655,8 @@ static u16 __init parse_ivhd_device_special(
add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu);
/* set device id of ioapic */
- ioapic_bdf[ivhd_device->special.handle] = bdf;
+ ioapic_sbdf[ivhd_device->special.handle].bdf = bdf;
+ ioapic_sbdf[ivhd_device->special.handle].seg = seg;
return dev_length;
}
@@ -729,7 +737,7 @@ static int __init parse_ivhd_block(struct acpi_ivhd_block_header *ivhd_block)
break;
case AMD_IOMMU_ACPI_IVHD_DEV_SPECIAL:
dev_length = parse_ivhd_device_special(
- ivhd_device,
+ ivhd_device, ivhd_block->pci_segment,
ivhd_block->header.length, block_length, iommu);
break;
default:
diff --git a/xen/drivers/passthrough/amd/iommu_detect.c b/xen/drivers/passthrough/amd/iommu_detect.c
index 164a6aeb9a..7b6e932160 100644
--- a/xen/drivers/passthrough/amd/iommu_detect.c
+++ b/xen/drivers/passthrough/amd/iommu_detect.c
@@ -27,8 +27,8 @@
#include <asm/hvm/svm/amd-iommu-proto.h>
#include <asm/hvm/svm/amd-iommu-acpi.h>
-static int __init get_iommu_msi_capabilities(u8 bus, u8 dev, u8 func,
- struct amd_iommu *iommu)
+static int __init get_iommu_msi_capabilities(
+ u16 seg, u8 bus, u8 dev, u8 func, struct amd_iommu *iommu)
{
int cap_ptr, cap_id;
u32 cap_header;
@@ -66,8 +66,8 @@ static int __init get_iommu_msi_capabilities(u8 bus, u8 dev, u8 func,
return 0;
}
-static int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
- struct amd_iommu *iommu)
+static int __init get_iommu_capabilities(
+ u16 seg, u8 bus, u8 dev, u8 func, u8 cap_ptr, struct amd_iommu *iommu)
{
u32 cap_header, cap_range, misc_info;
@@ -121,6 +121,11 @@ int __init amd_iommu_detect_one_acpi(void *ivhd)
spin_lock_init(&iommu->lock);
+ iommu->seg = ivhd_block->pci_segment;
+ if (alloc_ivrs_mappings(ivhd_block->pci_segment)) {
+ xfree(iommu);
+ return -ENOMEM;
+ }
iommu->bdf = ivhd_block->header.dev_id;
iommu->cap_offset = ivhd_block->cap_offset;
iommu->mmio_base_phys = ivhd_block->mmio_base;
@@ -147,8 +152,9 @@ int __init amd_iommu_detect_one_acpi(void *ivhd)
bus = iommu->bdf >> 8;
dev = PCI_SLOT(iommu->bdf & 0xFF);
func = PCI_FUNC(iommu->bdf & 0xFF);
- get_iommu_capabilities(bus, dev, func, iommu->cap_offset, iommu);
- get_iommu_msi_capabilities(bus, dev, func, iommu);
+ get_iommu_capabilities(iommu->seg, bus, dev, func,
+ iommu->cap_offset, iommu);
+ get_iommu_msi_capabilities(iommu->seg, bus, dev, func, iommu);
list_add_tail(&iommu->list, &amd_iommu_head);
diff --git a/xen/drivers/passthrough/amd/iommu_init.c b/xen/drivers/passthrough/amd/iommu_init.c
index b9742f1763..8a5e22c5aa 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -32,7 +32,7 @@
static int __initdata nr_amd_iommus;
unsigned short ivrs_bdf_entries;
-struct ivrs_mappings *ivrs_mappings;
+static struct radix_tree_root ivrs_maps;
struct list_head amd_iommu_head;
struct table_struct device_table;
@@ -692,7 +692,6 @@ error_out:
static void __init amd_iommu_init_cleanup(void)
{
struct amd_iommu *iommu, *next;
- int bdf;
/* free amd iommu list */
list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
@@ -708,40 +707,84 @@ static void __init amd_iommu_init_cleanup(void)
}
/* free interrupt remapping table */
- for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
- {
- if ( ivrs_mappings[bdf].intremap_table )
- amd_iommu_free_intremap_table(bdf);
- }
+ iterate_ivrs_entries(amd_iommu_free_intremap_table);
/* free device table */
deallocate_iommu_table_struct(&device_table);
/* free ivrs_mappings[] */
- if ( ivrs_mappings )
- {
- xfree(ivrs_mappings);
- ivrs_mappings = NULL;
- }
+ radix_tree_destroy(&ivrs_maps, xfree);
iommu_enabled = 0;
iommu_passthrough = 0;
iommu_intremap = 0;
}
-static int __init init_ivrs_mapping(void)
+/*
+ * We allocate an extra array element to store the segment number
+ * (and in the future perhaps other global information).
+ */
+#define IVRS_MAPPINGS_SEG(m) m[ivrs_bdf_entries].dte_requestor_id
+
+struct ivrs_mappings *get_ivrs_mappings(u16 seg)
+{
+ return radix_tree_lookup(&ivrs_maps, seg);
+}
+
+int iterate_ivrs_mappings(int (*handler)(u16 seg, struct ivrs_mappings *))
+{
+ u16 seg = 0;
+ int rc = 0;
+
+ do {
+ struct ivrs_mappings *map;
+
+ if ( !radix_tree_gang_lookup(&ivrs_maps, (void **)&map, seg, 1) )
+ break;
+ seg = IVRS_MAPPINGS_SEG(map);
+ rc = handler(seg, map);
+ } while ( !rc && ++seg );
+
+ return rc;
+}
+
+int iterate_ivrs_entries(int (*handler)(u16 seg, struct ivrs_mappings *))
{
+ u16 seg = 0;
+ int rc = 0;
+
+ do {
+ struct ivrs_mappings *map;
+ int bdf;
+
+ if ( !radix_tree_gang_lookup(&ivrs_maps, (void **)&map, seg, 1) )
+ break;
+ seg = IVRS_MAPPINGS_SEG(map);
+ for ( bdf = 0; !rc && bdf < ivrs_bdf_entries; ++bdf )
+ rc = handler(seg, map + bdf);
+ } while ( !rc && ++seg );
+
+ return rc;
+}
+
+int __init alloc_ivrs_mappings(u16 seg)
+{
+ struct ivrs_mappings *ivrs_mappings;
int bdf;
BUG_ON( !ivrs_bdf_entries );
- ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries);
+ if ( get_ivrs_mappings(seg) )
+ return 0;
+
+ ivrs_mappings = xmalloc_array(struct ivrs_mappings, ivrs_bdf_entries + 1);
if ( ivrs_mappings == NULL )
{
AMD_IOMMU_DEBUG("Error allocating IVRS Mappings table\n");
return -ENOMEM;
}
memset(ivrs_mappings, 0, ivrs_bdf_entries * sizeof(struct ivrs_mappings));
+ IVRS_MAPPINGS_SEG(ivrs_mappings) = seg;
/* assign default values for device entries */
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
@@ -763,10 +806,14 @@ static int __init init_ivrs_mapping(void)
if ( amd_iommu_perdev_intremap )
spin_lock_init(&ivrs_mappings[bdf].intremap_lock);
}
+
+ radix_tree_insert(&ivrs_maps, seg, ivrs_mappings);
+
return 0;
}
-static int __init amd_iommu_setup_device_table(void)
+static int __init amd_iommu_setup_device_table(
+ u16 seg, struct ivrs_mappings *ivrs_mappings)
{
int bdf;
void *intr_tb, *dte;
@@ -832,7 +879,8 @@ int __init amd_iommu_init(void)
if ( !ivrs_bdf_entries )
goto error_out;
- if ( init_ivrs_mapping() != 0 )
+ radix_tree_init(&ivrs_maps);
+ if ( alloc_ivrs_mappings(0) != 0 )
goto error_out;
if ( amd_iommu_update_ivrs_mapping_acpi() != 0 )
@@ -843,7 +891,7 @@ int __init amd_iommu_init(void)
goto error_out;
/* allocate and initialize a global device table shared by all iommus */
- if ( amd_iommu_setup_device_table() != 0 )
+ if ( iterate_ivrs_mappings(amd_iommu_setup_device_table) != 0 )
goto error_out;
/* per iommu initialization */
@@ -888,7 +936,8 @@ static void invalidate_all_domain_pages(void)
amd_iommu_flush_all_pages(d);
}
-static void invalidate_all_devices(void)
+static int _invalidate_all_devices(
+ u16 seg, struct ivrs_mappings *ivrs_mappings)
{
int bdf, req_id;
unsigned long flags;
@@ -896,7 +945,7 @@ static void invalidate_all_devices(void)
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
{
- iommu = find_iommu_for_device(bdf);
+ iommu = find_iommu_for_device(seg, bdf);
req_id = ivrs_mappings[bdf].dte_requestor_id;
if ( iommu )
{
@@ -907,6 +956,13 @@ static void invalidate_all_devices(void)
spin_unlock_irqrestore(&iommu->lock, flags);
}
}
+
+ return 0;
+}
+
+static void invalidate_all_devices(void)
+{
+ iterate_ivrs_mappings(_invalidate_all_devices);
}
void amd_iommu_suspend(void)
diff --git a/xen/drivers/passthrough/amd/iommu_intr.c b/xen/drivers/passthrough/amd/iommu_intr.c
index 4e6efa096f..70d83d9088 100644
--- a/xen/drivers/passthrough/amd/iommu_intr.c
+++ b/xen/drivers/passthrough/amd/iommu_intr.c
@@ -27,21 +27,21 @@
#define INTREMAP_LENGTH 0xB
#define INTREMAP_ENTRIES (1 << INTREMAP_LENGTH)
-int ioapic_bdf[MAX_IO_APICS];
+struct ioapic_sbdf ioapic_sbdf[MAX_IO_APICS];
void *shared_intremap_table;
static DEFINE_SPINLOCK(shared_intremap_lock);
-static spinlock_t* get_intremap_lock(int req_id)
+static spinlock_t* get_intremap_lock(int seg, int req_id)
{
return (amd_iommu_perdev_intremap ?
- &ivrs_mappings[req_id].intremap_lock:
+ &get_ivrs_mappings(seg)[req_id].intremap_lock:
&shared_intremap_lock);
}
-static int get_intremap_requestor_id(int bdf)
+static int get_intremap_requestor_id(int seg, int bdf)
{
ASSERT( bdf < ivrs_bdf_entries );
- return ivrs_mappings[bdf].dte_requestor_id;
+ return get_ivrs_mappings(seg)[bdf].dte_requestor_id;
}
static int get_intremap_offset(u8 vector, u8 dm)
@@ -53,20 +53,20 @@ static int get_intremap_offset(u8 vector, u8 dm)
return offset;
}
-static u8 *get_intremap_entry(int bdf, int offset)
+static u8 *get_intremap_entry(int seg, int bdf, int offset)
{
u8 *table;
- table = (u8*)ivrs_mappings[bdf].intremap_table;
+ table = (u8*)get_ivrs_mappings(seg)[bdf].intremap_table;
ASSERT( (table != NULL) && (offset < INTREMAP_ENTRIES) );
return (u8*) (table + offset);
}
-static void free_intremap_entry(int bdf, int offset)
+static void free_intremap_entry(int seg, int bdf, int offset)
{
u32* entry;
- entry = (u32*)get_intremap_entry(bdf, offset);
+ entry = (u32*)get_intremap_entry(seg, bdf, offset);
memset(entry, 0, sizeof(u32));
}
@@ -125,8 +125,8 @@ static void update_intremap_entry_from_ioapic(
spinlock_t *lock;
int offset;
- req_id = get_intremap_requestor_id(bdf);
- lock = get_intremap_lock(req_id);
+ req_id = get_intremap_requestor_id(iommu->seg, bdf);
+ lock = get_intremap_lock(iommu->seg, req_id);
delivery_mode = rte->delivery_mode;
vector = rte->vector;
@@ -136,7 +136,7 @@ static void update_intremap_entry_from_ioapic(
spin_lock_irqsave(lock, flags);
offset = get_intremap_offset(vector, delivery_mode);
- entry = (u32*)get_intremap_entry(req_id, offset);
+ entry = (u32*)get_intremap_entry(iommu->seg, req_id, offset);
update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
spin_unlock_irqrestore(lock, flags);
@@ -157,7 +157,7 @@ int __init amd_iommu_setup_ioapic_remapping(void)
u32* entry;
int apic, pin;
u8 delivery_mode, dest, vector, dest_mode;
- u16 bdf, req_id;
+ u16 seg, bdf, req_id;
struct amd_iommu *iommu;
spinlock_t *lock;
int offset;
@@ -174,17 +174,18 @@ int __init amd_iommu_setup_ioapic_remapping(void)
continue;
/* get device id of ioapic devices */
- bdf = ioapic_bdf[IO_APIC_ID(apic)];
- iommu = find_iommu_for_device(bdf);
+ bdf = ioapic_sbdf[IO_APIC_ID(apic)].bdf;
+ seg = ioapic_sbdf[IO_APIC_ID(apic)].seg;
+ iommu = find_iommu_for_device(seg, bdf);
if ( !iommu )
{
AMD_IOMMU_DEBUG("Fail to find iommu for ioapic "
- "device id = 0x%x\n", bdf);
+ "device id = %04x:%04x\n", seg, bdf);
continue;
}
- req_id = get_intremap_requestor_id(bdf);
- lock = get_intremap_lock(req_id);
+ req_id = get_intremap_requestor_id(iommu->seg, bdf);
+ lock = get_intremap_lock(iommu->seg, req_id);
delivery_mode = rte.delivery_mode;
vector = rte.vector;
@@ -193,7 +194,7 @@ int __init amd_iommu_setup_ioapic_remapping(void)
spin_lock_irqsave(lock, flags);
offset = get_intremap_offset(vector, delivery_mode);
- entry = (u32*)get_intremap_entry(req_id, offset);
+ entry = (u32*)get_intremap_entry(iommu->seg, req_id, offset);
update_intremap_entry(entry, vector,
delivery_mode, dest_mode, dest);
spin_unlock_irqrestore(lock, flags);
@@ -216,7 +217,7 @@ void amd_iommu_ioapic_update_ire(
struct IO_APIC_route_entry old_rte = { 0 };
struct IO_APIC_route_entry new_rte = { 0 };
unsigned int rte_lo = (reg & 1) ? reg - 1 : reg;
- int saved_mask, bdf;
+ int saved_mask, seg, bdf;
struct amd_iommu *iommu;
if ( !iommu_intremap )
@@ -226,12 +227,13 @@ void amd_iommu_ioapic_update_ire(
}
/* get device id of ioapic devices */
- bdf = ioapic_bdf[IO_APIC_ID(apic)];
- iommu = find_iommu_for_device(bdf);
+ bdf = ioapic_sbdf[IO_APIC_ID(apic)].bdf;
+ seg = ioapic_sbdf[IO_APIC_ID(apic)].seg;
+ iommu = find_iommu_for_device(seg, bdf);
if ( !iommu )
{
- AMD_IOMMU_DEBUG("Fail to find iommu for ioapic device id = 0x%x\n",
- bdf);
+ AMD_IOMMU_DEBUG("Fail to find iommu for ioapic device id ="
+ " %04x:%04x\n", seg, bdf);
__io_apic_write(apic, reg, value);
return;
}
@@ -289,28 +291,28 @@ static void update_intremap_entry_from_msi_msg(
int offset;
bdf = (pdev->bus << 8) | pdev->devfn;
- req_id = get_dma_requestor_id(bdf);
- alias_id = get_intremap_requestor_id(bdf);
+ req_id = get_dma_requestor_id(pdev->seg, bdf);
+ alias_id = get_intremap_requestor_id(pdev->seg, bdf);
if ( msg == NULL )
{
- lock = get_intremap_lock(req_id);
+ lock = get_intremap_lock(iommu->seg, req_id);
spin_lock_irqsave(lock, flags);
- free_intremap_entry(req_id, msi_desc->remap_index);
+ free_intremap_entry(iommu->seg, req_id, msi_desc->remap_index);
spin_unlock_irqrestore(lock, flags);
if ( ( req_id != alias_id ) &&
- ivrs_mappings[alias_id].intremap_table != NULL )
+ get_ivrs_mappings(pdev->seg)[alias_id].intremap_table != NULL )
{
- lock = get_intremap_lock(alias_id);
+ lock = get_intremap_lock(iommu->seg, alias_id);
spin_lock_irqsave(lock, flags);
- free_intremap_entry(alias_id, msi_desc->remap_index);
+ free_intremap_entry(iommu->seg, alias_id, msi_desc->remap_index);
spin_unlock_irqrestore(lock, flags);
}
goto done;
}
- lock = get_intremap_lock(req_id);
+ lock = get_intremap_lock(iommu->seg, req_id);
spin_lock_irqsave(lock, flags);
dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
@@ -320,7 +322,7 @@ static void update_intremap_entry_from_msi_msg(
offset = get_intremap_offset(vector, delivery_mode);
msi_desc->remap_index = offset;
- entry = (u32*)get_intremap_entry(req_id, offset);
+ entry = (u32*)get_intremap_entry(iommu->seg, req_id, offset);
update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
spin_unlock_irqrestore(lock, flags);
@@ -331,12 +333,12 @@ static void update_intremap_entry_from_msi_msg(
* devices.
*/
- lock = get_intremap_lock(alias_id);
+ lock = get_intremap_lock(iommu->seg, alias_id);
if ( ( req_id != alias_id ) &&
- ivrs_mappings[alias_id].intremap_table != NULL )
+ get_ivrs_mappings(pdev->seg)[alias_id].intremap_table != NULL )
{
spin_lock_irqsave(lock, flags);
- entry = (u32*)get_intremap_entry(alias_id, offset);
+ entry = (u32*)get_intremap_entry(iommu->seg, alias_id, offset);
update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
spin_unlock_irqrestore(lock, flags);
}
@@ -362,7 +364,7 @@ void amd_iommu_msi_msg_update_ire(
if ( !iommu_intremap )
return;
- iommu = find_iommu_for_device((pdev->bus << 8) | pdev->devfn);
+ iommu = find_iommu_for_device(pdev->seg, (pdev->bus << 8) | pdev->devfn);
if ( !iommu )
{
@@ -379,15 +381,18 @@ void amd_iommu_read_msi_from_ire(
{
}
-void __init amd_iommu_free_intremap_table(int bdf)
+int __init amd_iommu_free_intremap_table(
+ u16 seg, struct ivrs_mappings *ivrs_mapping)
{
- void *tb = ivrs_mappings[bdf].intremap_table;
+ void *tb = ivrs_mapping->intremap_table;
if ( tb )
{
__free_amd_iommu_tables(tb, INTREMAP_TABLE_ORDER);
- ivrs_mappings[bdf].intremap_table = NULL;
+ ivrs_mapping->intremap_table = NULL;
}
+
+ return 0;
}
void* __init amd_iommu_alloc_intremap_table(void)
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index 6a8619c0ef..1402806c5f 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -719,8 +719,8 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
for_each_pdev( d, pdev )
{
bdf = (pdev->bus << 8) | pdev->devfn;
- req_id = get_dma_requestor_id(bdf);
- iommu = find_iommu_for_device(bdf);
+ req_id = get_dma_requestor_id(pdev->seg, bdf);
+ iommu = find_iommu_for_device(pdev->seg, bdf);
if ( !iommu )
{
AMD_IOMMU_DEBUG("%s Fail to find iommu.\n", __func__);
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 9736251cb1..14a3f489fa 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -29,10 +29,12 @@
extern bool_t __read_mostly opt_irq_perdev_vector_map;
extern bool_t __read_mostly iommu_amd_perdev_vector_map;
-struct amd_iommu *find_iommu_for_device(int bdf)
+struct amd_iommu *find_iommu_for_device(int seg, int bdf)
{
+ struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
+
BUG_ON ( bdf >= ivrs_bdf_entries );
- return ivrs_mappings[bdf].iommu;
+ return ivrs_mappings ? ivrs_mappings[bdf].iommu : NULL;
}
/*
@@ -43,8 +45,9 @@ struct amd_iommu *find_iommu_for_device(int bdf)
* Return original device id, if device has valid interrupt remapping
* table setup for both select entry and alias entry.
*/
-int get_dma_requestor_id(u16 bdf)
+int get_dma_requestor_id(u16 seg, u16 bdf)
{
+ struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
int req_id;
BUG_ON ( bdf >= ivrs_bdf_entries );
@@ -95,7 +98,7 @@ static void amd_iommu_setup_domain_device(
valid = 0;
/* get device-table entry */
- req_id = get_dma_requestor_id(bdf);
+ req_id = get_dma_requestor_id(iommu->seg, bdf);
dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
spin_lock_irqsave(&iommu->lock, flags);
@@ -139,7 +142,7 @@ static void __init amd_iommu_setup_dom0_devices(struct domain *d)
list_add(&pdev->domain_list, &d->arch.pdev_list);
bdf = (bus << 8) | devfn;
- iommu = find_iommu_for_device(bdf);
+ iommu = find_iommu_for_device(pdev->seg, bdf);
if ( likely(iommu != NULL) )
amd_iommu_setup_domain_device(d, iommu, bdf);
@@ -287,7 +290,7 @@ static void amd_iommu_disable_domain_device(
int req_id;
BUG_ON ( iommu->dev_table.buffer == NULL );
- req_id = get_dma_requestor_id(bdf);
+ req_id = get_dma_requestor_id(iommu->seg, bdf);
dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
spin_lock_irqsave(&iommu->lock, flags);
@@ -318,12 +321,12 @@ static int reassign_device( struct domain *source, struct domain *target,
return -ENODEV;
bdf = (bus << 8) | devfn;
- iommu = find_iommu_for_device(bdf);
+ iommu = find_iommu_for_device(seg, bdf);
if ( !iommu )
{
AMD_IOMMU_DEBUG("Fail to find iommu."
- " %02x:%x02.%x cannot be assigned to domain %d\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ " %04x:%02x:%x02.%x cannot be assigned to dom%d\n",
+ seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
target->domain_id);
return -ENODEV;
}
@@ -339,8 +342,8 @@ static int reassign_device( struct domain *source, struct domain *target,
allocate_domain_resources(t);
amd_iommu_setup_domain_device(target, iommu, bdf);
- AMD_IOMMU_DEBUG("Re-assign %02x:%02x.%x from domain %d to domain %d\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ AMD_IOMMU_DEBUG("Re-assign %04x:%02x:%02x.%u from dom%d to dom%d\n",
+ seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
source->domain_id, target->domain_id);
return 0;
@@ -348,8 +351,9 @@ static int reassign_device( struct domain *source, struct domain *target,
static int amd_iommu_assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
{
+ struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
int bdf = (bus << 8) | devfn;
- int req_id = get_dma_requestor_id(bdf);
+ int req_id = get_dma_requestor_id(seg, bdf);
if ( ivrs_mappings[req_id].unity_map_enable )
{
@@ -439,12 +443,12 @@ static int amd_iommu_add_device(struct pci_dev *pdev)
return -EINVAL;
bdf = (pdev->bus << 8) | pdev->devfn;
- iommu = find_iommu_for_device(bdf);
+ iommu = find_iommu_for_device(pdev->seg, bdf);
if ( !iommu )
{
AMD_IOMMU_DEBUG("Fail to find iommu."
- " %02x:%02x.%x cannot be assigned to domain %d\n",
- pdev->bus, PCI_SLOT(pdev->devfn),
+ " %04x:%02x:%02x.%u cannot be assigned to dom%d\n",
+ pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn), pdev->domain->domain_id);
return -ENODEV;
}
@@ -461,12 +465,12 @@ static int amd_iommu_remove_device(struct pci_dev *pdev)
return -EINVAL;
bdf = (pdev->bus << 8) | pdev->devfn;
- iommu = find_iommu_for_device(bdf);
+ iommu = find_iommu_for_device(pdev->seg, bdf);
if ( !iommu )
{
AMD_IOMMU_DEBUG("Fail to find iommu."
- " %02x:%02x.%x cannot be removed from domain %d\n",
- pdev->bus, PCI_SLOT(pdev->devfn),
+ " %04x:%02x:%02x.%u cannot be removed from dom%d\n",
+ pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn), pdev->domain->domain_id);
return -ENODEV;
}
@@ -480,7 +484,7 @@ static int amd_iommu_group_id(u16 seg, u8 bus, u8 devfn)
int rt;
int bdf = (bus << 8) | devfn;
rt = ( bdf < ivrs_bdf_entries ) ?
- get_dma_requestor_id(bdf) :
+ get_dma_requestor_id(seg, bdf) :
bdf;
return rt;
}
diff --git a/xen/include/asm-x86/amd-iommu.h b/xen/include/asm-x86/amd-iommu.h
index 784c9d989b..ea40310c78 100644
--- a/xen/include/asm-x86/amd-iommu.h
+++ b/xen/include/asm-x86/amd-iommu.h
@@ -40,6 +40,7 @@ struct amd_iommu {
struct list_head list;
spinlock_t lock; /* protect iommu */
+ u16 seg;
u16 bdf;
u8 cap_offset;
u8 revision;
@@ -101,6 +102,10 @@ struct ivrs_mappings {
};
extern unsigned short ivrs_bdf_entries;
-extern struct ivrs_mappings *ivrs_mappings;
+
+int alloc_ivrs_mappings(u16 seg);
+struct ivrs_mappings *get_ivrs_mappings(u16 seg);
+int iterate_ivrs_mappings(int (*)(u16 seg, struct ivrs_mappings *));
+int iterate_ivrs_entries(int (*)(u16 seg, struct ivrs_mappings *));
#endif /* _ASM_X86_64_AMD_IOMMU_H */
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 1444e9104c..a6e600b588 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -65,7 +65,7 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain,
void amd_iommu_share_p2m(struct domain *d);
/* device table functions */
-int get_dma_requestor_id(u16 bdf);
+int get_dma_requestor_id(u16 seg, u16 bdf);
void amd_iommu_add_dev_table_entry(
u32 *dte, u8 sys_mgt, u8 dev_ex, u8 lint1_pass, u8 lint0_pass,
u8 nmi_pass, u8 ext_int_pass, u8 init_pass);
@@ -80,12 +80,12 @@ int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
void flush_command_buffer(struct amd_iommu *iommu);
/* find iommu for bdf */
-struct amd_iommu *find_iommu_for_device(int bdf);
+struct amd_iommu *find_iommu_for_device(int seg, int bdf);
/* interrupt remapping */
int amd_iommu_setup_ioapic_remapping(void);
void *amd_iommu_alloc_intremap_table(void);
-void amd_iommu_free_intremap_table(int bdf);
+int amd_iommu_free_intremap_table(u16 seg, struct ivrs_mappings *);
void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id);
void amd_iommu_ioapic_update_ire(
unsigned int apic, unsigned int reg, unsigned int value);
@@ -94,7 +94,9 @@ void amd_iommu_msi_msg_update_ire(
void amd_iommu_read_msi_from_ire(
struct msi_desc *msi_desc, struct msi_msg *msg);
-extern int ioapic_bdf[MAX_IO_APICS];
+extern struct ioapic_sbdf {
+ u16 bdf, seg;
+} ioapic_sbdf[MAX_IO_APICS];
extern void *shared_intremap_table;
/* power management support */