aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-04-21 10:01:46 +0100
committerKeir Fraser <keir.fraser@citrix.com>2008-04-21 10:01:46 +0100
commit46163648717d370e5ed7890a43a5c92d1f220e0f (patch)
tree53b42fe2f2d56eb44be70268b6321dafada29285
parent93f92fcf9c8ef07c4e8faff20f1cbfedc5296d3b (diff)
downloadxen-46163648717d370e5ed7890a43a5c92d1f220e0f.tar.gz
xen-46163648717d370e5ed7890a43a5c92d1f220e0f.tar.bz2
xen-46163648717d370e5ed7890a43a5c92d1f220e0f.zip
amd iommu: clean up debug information.
Signed-off-by: Wei Wang <wei.wang2@amd.com>
-rw-r--r--xen/arch/x86/setup.c2
-rw-r--r--xen/drivers/passthrough/amd/iommu_acpi.c180
-rw-r--r--xen/drivers/passthrough/amd/iommu_detect.c5
-rw-r--r--xen/drivers/passthrough/amd/iommu_init.c15
-rw-r--r--xen/drivers/passthrough/amd/iommu_map.c21
-rw-r--r--xen/drivers/passthrough/amd/pci_amd_iommu.c35
-rw-r--r--xen/drivers/passthrough/iommu.c15
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c2
-rw-r--r--xen/include/asm-x86/amd-iommu.h2
-rw-r--r--xen/include/asm-x86/hvm/svm/amd-iommu-proto.h13
10 files changed, 140 insertions, 150 deletions
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 9b025b51b1..3f271890fb 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1021,8 +1021,6 @@ void __init __start_xen(unsigned long mbi_p)
iommu_setup();
- amd_iommu_detect();
-
/*
* We're going to setup domain0 using the module(s) that we stashed safely
* above our heap. The second module, if present, is an initrd ramdisk.
diff --git a/xen/drivers/passthrough/amd/iommu_acpi.c b/xen/drivers/passthrough/amd/iommu_acpi.c
index ff803c4def..ba0880f1f0 100644
--- a/xen/drivers/passthrough/amd/iommu_acpi.c
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c
@@ -139,7 +139,7 @@ static int __init register_exclusion_range_for_device(
iommu = find_iommu_for_device(bus, devfn);
if ( !iommu )
{
- dprintk(XENLOG_ERR, "IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf);
+ amd_iov_error("IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf);
return -ENODEV;
}
req = ivrs_mappings[bdf].dte_requestor_id;
@@ -221,7 +221,7 @@ static int __init parse_ivmd_device_select(
bdf = ivmd_block->header.dev_id;
if ( bdf >= ivrs_bdf_entries )
{
- dprintk(XENLOG_ERR, "IVMD Error: Invalid Dev_Id 0x%x\n", bdf);
+ amd_iov_error("IVMD Error: Invalid Dev_Id 0x%x\n", bdf);
return -ENODEV;
}
@@ -238,22 +238,19 @@ static int __init parse_ivmd_device_range(
first_bdf = ivmd_block->header.dev_id;
if ( first_bdf >= ivrs_bdf_entries )
{
- dprintk(XENLOG_ERR, "IVMD Error: "
- "Invalid Range_First Dev_Id 0x%x\n", first_bdf);
+ amd_iov_error(
+ "IVMD Error: Invalid Range_First Dev_Id 0x%x\n", first_bdf);
return -ENODEV;
}
last_bdf = ivmd_block->last_dev_id;
if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
{
- dprintk(XENLOG_ERR, "IVMD Error: "
- "Invalid Range_Last Dev_Id 0x%x\n", last_bdf);
+ amd_iov_error(
+ "IVMD Error: Invalid Range_Last Dev_Id 0x%x\n", last_bdf);
return -ENODEV;
}
- dprintk(XENLOG_ERR, " Dev_Id Range: 0x%x -> 0x%x\n",
- first_bdf, last_bdf);
-
for ( bdf = first_bdf, error = 0; (bdf <= last_bdf) && !error; bdf++ )
error = register_exclusion_range_for_device(
bdf, base, limit, iw, ir);
@@ -272,8 +269,7 @@ static int __init parse_ivmd_device_iommu(
ivmd_block->cap_offset);
if ( !iommu )
{
- dprintk(XENLOG_ERR,
- "IVMD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n",
+ amd_iov_error("IVMD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n",
ivmd_block->header.dev_id, ivmd_block->cap_offset);
return -ENODEV;
}
@@ -290,7 +286,7 @@ static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block)
if ( ivmd_block->header.length <
sizeof(struct acpi_ivmd_block_header) )
{
- dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Length!\n");
+ amd_iov_error("IVMD Error: Invalid Block Length!\n");
return -ENODEV;
}
@@ -299,10 +295,9 @@ static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block)
base = start_addr & PAGE_MASK;
limit = (start_addr + mem_length - 1) & PAGE_MASK;
- dprintk(XENLOG_INFO, "IVMD Block: Type 0x%x\n",
- ivmd_block->header.type);
- dprintk(XENLOG_INFO, " Start_Addr_Phys 0x%lx\n", start_addr);
- dprintk(XENLOG_INFO, " Mem_Length 0x%lx\n", mem_length);
+ amd_iov_info("IVMD Block: Type 0x%x\n",ivmd_block->header.type);
+ amd_iov_info(" Start_Addr_Phys 0x%lx\n", start_addr);
+ amd_iov_info(" Mem_Length 0x%lx\n", mem_length);
if ( get_field_from_byte(ivmd_block->header.flags,
AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK,
@@ -321,7 +316,7 @@ static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block)
}
else
{
- dprintk(KERN_ERR, "IVMD Error: Invalid Flag Field!\n");
+ amd_iov_error("IVMD Error: Invalid Flag Field!\n");
return -ENODEV;
}
@@ -344,7 +339,7 @@ static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block)
base, limit, iw, ir);
default:
- dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Type!\n");
+ amd_iov_error("IVMD Error: Invalid Block Type!\n");
return -ENODEV;
}
}
@@ -354,7 +349,7 @@ static u16 __init parse_ivhd_device_padding(
{
if ( header_length < (block_length + pad_length) )
{
- dprintk(XENLOG_ERR, "IVHD Error: Invalid Device_Entry Length!\n");
+ amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
return 0;
}
@@ -369,8 +364,7 @@ static u16 __init parse_ivhd_device_select(
bdf = ivhd_device->header.dev_id;
if ( bdf >= ivrs_bdf_entries )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+ amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf);
return 0;
}
@@ -393,14 +387,14 @@ static u16 __init parse_ivhd_device_range(
dev_length = sizeof(struct acpi_ivhd_device_range);
if ( header_length < (block_length + dev_length) )
{
- dprintk(XENLOG_ERR, "IVHD Error: Invalid Device_Entry Length!\n");
+ amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
return 0;
}
if ( ivhd_device->range.trailer.type !=
AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
+ amd_iov_error("IVHD Error: "
"Invalid Range: End_Type 0x%x\n",
ivhd_device->range.trailer.type);
return 0;
@@ -409,21 +403,20 @@ static u16 __init parse_ivhd_device_range(
first_bdf = ivhd_device->header.dev_id;
if ( first_bdf >= ivrs_bdf_entries )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
+ amd_iov_error(
+ "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf);
return 0;
}
last_bdf = ivhd_device->range.trailer.dev_id;
if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
+ amd_iov_error(
+ "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
return 0;
}
- dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
- first_bdf, last_bdf);
+ amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf);
/* override flags for range of devices */
sys_mgt = get_field_from_byte(ivhd_device->header.flags,
@@ -444,28 +437,25 @@ static u16 __init parse_ivhd_device_alias(
dev_length = sizeof(struct acpi_ivhd_device_alias);
if ( header_length < (block_length + dev_length) )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Device_Entry Length!\n");
+ amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
return 0;
}
bdf = ivhd_device->header.dev_id;
if ( bdf >= ivrs_bdf_entries )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+ amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf);
return 0;
}
alias_id = ivhd_device->alias.dev_id;
if ( alias_id >= ivrs_bdf_entries )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Alias Dev_Id 0x%x\n", alias_id);
+ amd_iov_error("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id);
return 0;
}
- dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id);
+ amd_iov_info(" Dev_Id Alias: 0x%x\n", alias_id);
/* override requestor_id and flags for device */
ivrs_mappings[bdf].dte_requestor_id = alias_id;
@@ -490,15 +480,14 @@ static u16 __init parse_ivhd_device_alias_range(
dev_length = sizeof(struct acpi_ivhd_device_alias_range);
if ( header_length < (block_length + dev_length) )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Device_Entry Length!\n");
+ amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
return 0;
}
if ( ivhd_device->alias_range.trailer.type !=
AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
+ amd_iov_error("IVHD Error: "
"Invalid Range: End_Type 0x%x\n",
ivhd_device->alias_range.trailer.type);
return 0;
@@ -507,30 +496,28 @@ static u16 __init parse_ivhd_device_alias_range(
first_bdf = ivhd_device->header.dev_id;
if ( first_bdf >= ivrs_bdf_entries )
{
- dprintk(XENLOG_ERR,"IVHD Error: "
- "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
+ amd_iov_error(
+ "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf);
return 0;
}
last_bdf = ivhd_device->alias_range.trailer.dev_id;
if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
+ amd_iov_error(
+ "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
return 0;
}
alias_id = ivhd_device->alias_range.alias.dev_id;
if ( alias_id >= ivrs_bdf_entries )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Alias Dev_Id 0x%x\n", alias_id);
+ amd_iov_error("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id);
return 0;
}
- dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
- first_bdf, last_bdf);
- dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id);
+ amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf);
+ amd_iov_info(" Dev_Id Alias: 0x%x\n", alias_id);
/* override requestor_id and flags for range of devices */
sys_mgt = get_field_from_byte(ivhd_device->header.flags,
@@ -555,16 +542,14 @@ static u16 __init parse_ivhd_device_extended(
dev_length = sizeof(struct acpi_ivhd_device_extended);
if ( header_length < (block_length + dev_length) )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Device_Entry Length!\n");
+ amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
return 0;
}
bdf = ivhd_device->header.dev_id;
if ( bdf >= ivrs_bdf_entries )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+ amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf);
return 0;
}
@@ -587,15 +572,14 @@ static u16 __init parse_ivhd_device_extended_range(
dev_length = sizeof(struct acpi_ivhd_device_extended_range);
if ( header_length < (block_length + dev_length) )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Device_Entry Length!\n");
+ amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n");
return 0;
}
if ( ivhd_device->extended_range.trailer.type !=
AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
+ amd_iov_error("IVHD Error: "
"Invalid Range: End_Type 0x%x\n",
ivhd_device->extended_range.trailer.type);
return 0;
@@ -604,20 +588,20 @@ static u16 __init parse_ivhd_device_extended_range(
first_bdf = ivhd_device->header.dev_id;
if ( first_bdf >= ivrs_bdf_entries )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
+ amd_iov_error(
+ "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf);
return 0;
}
last_bdf = ivhd_device->extended_range.trailer.dev_id;
if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
{
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
+ amd_iov_error(
+ "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
return 0;
}
- dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
+ amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n",
first_bdf, last_bdf);
/* override flags for range of devices */
@@ -639,7 +623,7 @@ static int __init parse_ivhd_block(struct acpi_ivhd_block_header *ivhd_block)
if ( ivhd_block->header.length <
sizeof(struct acpi_ivhd_block_header) )
{
- dprintk(XENLOG_ERR, "IVHD Error: Invalid Block Length!\n");
+ amd_iov_error("IVHD Error: Invalid Block Length!\n");
return -ENODEV;
}
@@ -647,21 +631,16 @@ static int __init parse_ivhd_block(struct acpi_ivhd_block_header *ivhd_block)
ivhd_block->cap_offset);
if ( !iommu )
{
- dprintk(XENLOG_ERR,
- "IVHD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n",
+ amd_iov_error("IVHD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n",
ivhd_block->header.dev_id, ivhd_block->cap_offset);
return -ENODEV;
}
- dprintk(XENLOG_INFO, "IVHD Block:\n");
- dprintk(XENLOG_INFO, " Cap_Offset 0x%x\n",
- ivhd_block->cap_offset);
- dprintk(XENLOG_INFO, " MMIO_BAR_Phys 0x%lx\n",
- (unsigned long)ivhd_block->mmio_base);
- dprintk(XENLOG_INFO, " PCI_Segment 0x%x\n",
- ivhd_block->pci_segment);
- dprintk(XENLOG_INFO, " IOMMU_Info 0x%x\n",
- ivhd_block->iommu_info);
+ amd_iov_info("IVHD Block:\n");
+ amd_iov_info(" Cap_Offset 0x%x\n", ivhd_block->cap_offset);
+ amd_iov_info(" MMIO_BAR_Phys 0x%"PRIx64"\n",ivhd_block->mmio_base);
+ amd_iov_info( " PCI_Segment 0x%x\n", ivhd_block->pci_segment);
+ amd_iov_info( " IOMMU_Info 0x%x\n", ivhd_block->iommu_info);
/* override IOMMU support flags */
iommu->coherent = get_field_from_byte(ivhd_block->header.flags,
@@ -692,13 +671,10 @@ static int __init parse_ivhd_block(struct acpi_ivhd_block_header *ivhd_block)
ivhd_device = (union acpi_ivhd_device *)
((u8 *)ivhd_block + block_length);
- dprintk(XENLOG_INFO, "IVHD Device Entry:\n");
- dprintk(XENLOG_INFO, " Type 0x%x\n",
- ivhd_device->header.type);
- dprintk(XENLOG_INFO, " Dev_Id 0x%x\n",
- ivhd_device->header.dev_id);
- dprintk(XENLOG_INFO, " Flags 0x%x\n",
- ivhd_device->header.flags);
+ amd_iov_info( "IVHD Device Entry:\n");
+ amd_iov_info( " Type 0x%x\n", ivhd_device->header.type);
+ amd_iov_info( " Dev_Id 0x%x\n", ivhd_device->header.dev_id);
+ amd_iov_info( " Flags 0x%x\n", ivhd_device->header.flags);
switch ( ivhd_device->header.type )
{
@@ -741,8 +717,7 @@ static int __init parse_ivhd_block(struct acpi_ivhd_block_header *ivhd_block)
ivhd_block->header.length, block_length);
break;
default:
- dprintk(XENLOG_ERR, "IVHD Error: "
- "Invalid Device Type!\n");
+ amd_iov_error("IVHD Error: Invalid Device Type!\n");
dev_length = 0;
break;
}
@@ -774,46 +749,49 @@ static int __init parse_ivrs_block(struct acpi_ivrs_block_header *ivrs_block)
return parse_ivmd_block(ivmd_block);
default:
- dprintk(XENLOG_ERR, "IVRS Error: Invalid Block Type!\n");
+ amd_iov_error("IVRS Error: Invalid Block Type!\n");
return -ENODEV;
}
return 0;
}
-void __init dump_acpi_table_header(struct acpi_table_header *table)
+static void __init dump_acpi_table_header(struct acpi_table_header *table)
{
+#ifdef AMD_IOV_DEBUG
int i;
- printk(XENLOG_INFO "AMD IOMMU: ACPI Table:\n");
- printk(XENLOG_INFO " Signature ");
+ amd_iov_info("ACPI Table:\n");
+ amd_iov_info(" Signature ");
for ( i = 0; i < ACPI_NAME_SIZE; i++ )
printk("%c", table->signature[i]);
printk("\n");
- printk(" Length 0x%x\n", table->length);
- printk(" Revision 0x%x\n", table->revision);
- printk(" CheckSum 0x%x\n", table->checksum);
+ amd_iov_info(" Length 0x%x\n", table->length);
+ amd_iov_info(" Revision 0x%x\n", table->revision);
+ amd_iov_info(" CheckSum 0x%x\n", table->checksum);
- printk(" OEM_Id ");
+ amd_iov_info(" OEM_Id ");
for ( i = 0; i < ACPI_OEM_ID_SIZE; i++ )
printk("%c", table->oem_id[i]);
printk("\n");
- printk(" OEM_Table_Id ");
+ amd_iov_info(" OEM_Table_Id ");
for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; i++ )
printk("%c", table->oem_table_id[i]);
printk("\n");
- printk(" OEM_Revision 0x%x\n", table->oem_revision);
+ amd_iov_info(" OEM_Revision 0x%x\n", table->oem_revision);
- printk(" Creator_Id ");
+ amd_iov_info(" Creator_Id ");
for ( i = 0; i < ACPI_NAME_SIZE; i++ )
printk("%c", table->asl_compiler_id[i]);
printk("\n");
- printk(" Creator_Revision 0x%x\n",
+ amd_iov_info(" Creator_Revision 0x%x\n",
table->asl_compiler_revision);
+#endif
+
}
int __init parse_ivrs_table(unsigned long phys_addr, unsigned long size)
@@ -827,9 +805,7 @@ int __init parse_ivrs_table(unsigned long phys_addr, unsigned long size)
BUG_ON(!table);
-#if 0
dump_acpi_table_header(table);
-#endif
/* validate checksum: sum of entire table == 0 */
checksum = 0;
@@ -838,7 +814,7 @@ int __init parse_ivrs_table(unsigned long phys_addr, unsigned long size)
checksum += raw_table[i];
if ( checksum )
{
- dprintk(XENLOG_ERR, "IVRS Error: "
+ amd_iov_error("IVRS Error: "
"Invalid Checksum 0x%x\n", checksum);
return -ENODEV;
}
@@ -850,15 +826,15 @@ int __init parse_ivrs_table(unsigned long phys_addr, unsigned long size)
ivrs_block = (struct acpi_ivrs_block_header *)
((u8 *)table + length);
- dprintk(XENLOG_INFO, "IVRS Block:\n");
- dprintk(XENLOG_INFO, " Type 0x%x\n", ivrs_block->type);
- dprintk(XENLOG_INFO, " Flags 0x%x\n", ivrs_block->flags);
- dprintk(XENLOG_INFO, " Length 0x%x\n", ivrs_block->length);
- dprintk(XENLOG_INFO, " Dev_Id 0x%x\n", ivrs_block->dev_id);
+ amd_iov_info("IVRS Block:\n");
+ amd_iov_info(" Type 0x%x\n", ivrs_block->type);
+ amd_iov_info(" Flags 0x%x\n", ivrs_block->flags);
+ amd_iov_info(" Length 0x%x\n", ivrs_block->length);
+ amd_iov_info(" Dev_Id 0x%x\n", ivrs_block->dev_id);
if ( table->length < (length + ivrs_block->length) )
{
- dprintk(XENLOG_ERR, "IVRS Error: "
+ amd_iov_error("IVRS Error: "
"Table Length Exceeded: 0x%x -> 0x%lx\n",
table->length,
(length + ivrs_block->length));
diff --git a/xen/drivers/passthrough/amd/iommu_detect.c b/xen/drivers/passthrough/amd/iommu_detect.c
index 8d67324d61..68d04a66a2 100644
--- a/xen/drivers/passthrough/amd/iommu_detect.c
+++ b/xen/drivers/passthrough/amd/iommu_detect.c
@@ -117,7 +117,7 @@ static int __init get_iommu_msi_capabilities(u8 bus, u8 dev, u8 func,
if ( !iommu->msi_cap )
return -ENODEV;
- dprintk(XENLOG_INFO, "AMD IOMMU: Found MSI capability block \n");
+ amd_iov_info("Found MSI capability block \n");
control = pci_conf_read16(bus, dev, func,
iommu->msi_cap + PCI_MSI_FLAGS);
iommu->maskbit = control & PCI_MSI_FLAGS_MASKBIT;
@@ -138,8 +138,7 @@ int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
if ( ((mmio_bar & 0x1) == 0) || (iommu->mmio_base_phys == 0) )
{
- dprintk(XENLOG_ERR ,
- "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar);
+ amd_iov_error("Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar);
return -ENODEV;
}
diff --git a/xen/drivers/passthrough/amd/iommu_init.c b/xen/drivers/passthrough/amd/iommu_init.c
index c2e967f087..8d4d713c81 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -35,8 +35,7 @@ int __init map_iommu_mmio_region(struct amd_iommu *iommu)
if ( nr_amd_iommus > MAX_AMD_IOMMUS )
{
- gdprintk(XENLOG_ERR,
- "IOMMU: nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
+ amd_iov_error("nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
return -ENOMEM;
}
@@ -395,7 +394,7 @@ static void parse_event_log_entry(u32 entry[])
if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST)
|| (code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) )
{
- dprintk(XENLOG_ERR, "Invalid event log entry!\n");
+ amd_iov_error("Invalid event log entry!\n");
return;
}
@@ -408,8 +407,8 @@ static void parse_event_log_entry(u32 entry[])
IOMMU_EVENT_DOMAIN_ID_MASK,
IOMMU_EVENT_DOMAIN_ID_SHIFT);
addr= (u64*) (entry + 2);
- dprintk(XENLOG_ERR,
- "%s: domain = %d, device id = 0x%x, fault address = 0x%"PRIx64"\n",
+ printk(XENLOG_ERR "AMD_IOV: "
+ "%s: domain:%d, device id:0x%x, fault address:0x%"PRIx64"\n",
event_str[code-1], domain_id, device_id, *addr);
}
}
@@ -445,7 +444,7 @@ static int set_iommu_interrupt_handler(struct amd_iommu *iommu)
if ( !vector )
{
- gdprintk(XENLOG_ERR, "AMD IOMMU: no vectors\n");
+ amd_iov_error("no vectors\n");
return 0;
}
@@ -453,7 +452,7 @@ static int set_iommu_interrupt_handler(struct amd_iommu *iommu)
ret = request_irq(vector, amd_iommu_page_fault, 0, "dmar", iommu);
if ( ret )
{
- gdprintk(XENLOG_ERR, "AMD IOMMU: can't request irq\n");
+ amd_iov_error("can't request irq\n");
return 0;
}
@@ -483,5 +482,5 @@ void __init enable_iommu(struct amd_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags);
- printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus);
+ printk("AMD_IOV: IOMMU %d Enabled.\n", nr_amd_iommus);
}
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index aa584e615f..50c9ef66a7 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -154,8 +154,7 @@ void flush_command_buffer(struct amd_iommu *iommu)
}
else
{
- dprintk(XENLOG_WARNING, "AMD IOMMU: Warning:"
- " ComWaitInt bit did not assert!\n");
+ amd_iov_warning("Warning: ComWaitInt bit did not assert!\n");
}
}
}
@@ -402,10 +401,9 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
if ( pte == NULL )
{
- dprintk(XENLOG_ERR,
- "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
+ amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
spin_unlock_irqrestore(&hd->mapping_lock, flags);
- return -EIO;
+ return -EFAULT;
}
set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
@@ -439,10 +437,9 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
if ( pte == NULL )
{
- dprintk(XENLOG_ERR,
- "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
+ amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
spin_unlock_irqrestore(&hd->mapping_lock, flags);
- return -EIO;
+ return -EFAULT;
}
/* mark PTE as 'page not present' */
@@ -479,9 +476,8 @@ int amd_iommu_reserve_domain_unity_map(
hd->root_table, hd->paging_mode, phys_addr >> PAGE_SHIFT);
if ( pte == NULL )
{
- dprintk(XENLOG_ERR,
- "AMD IOMMU: Invalid IO pagetable entry "
- "phys_addr = %lx\n", phys_addr);
+ amd_iov_error(
+ "Invalid IO pagetable entry phys_addr = %lx\n", phys_addr);
spin_unlock_irqrestore(&hd->mapping_lock, flags);
return -EFAULT;
}
@@ -528,8 +524,7 @@ int amd_iommu_sync_p2m(struct domain *d)
pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
if ( pte == NULL )
{
- dprintk(XENLOG_ERR,
- "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
+ amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
spin_unlock_irqrestore(&hd->mapping_lock, flags);
return -EFAULT;
}
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index dda365e6ea..08d46e37e7 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -39,7 +39,7 @@ struct ivrs_mappings *ivrs_mappings = NULL;
int amd_iommu_enabled = 0;
static int enable_amd_iommu = 0;
-boolean_param("enable_amd_iommu", enable_amd_iommu);
+boolean_param("enable_amd_iov", enable_amd_iommu);
static void deallocate_domain_page_tables(struct hvm_iommu *hd)
{
@@ -104,7 +104,7 @@ static int __init allocate_iommu_table_struct(struct table_struct *table,
if ( !table->buffer )
{
- dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating %s\n", name);
+ amd_iov_error("Error allocating %s\n", name);
return -ENOMEM;
}
@@ -169,7 +169,7 @@ int iommu_detect_callback(u8 bus, u8 dev, u8 func, u8 cap_ptr)
iommu = (struct amd_iommu *) xmalloc(struct amd_iommu);
if ( !iommu )
{
- dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating amd_iommu\n");
+ amd_iov_error("Error allocating amd_iommu\n");
return -ENOMEM;
}
memset(iommu, 0, sizeof(struct amd_iommu));
@@ -237,7 +237,7 @@ static int __init amd_iommu_init(void)
}
if ( acpi_table_parse(ACPI_IVRS, parse_ivrs_table) != 0 )
- dprintk(XENLOG_INFO, "AMD IOMMU: Did not find IVRS table!\n");
+ amd_iov_error("Did not find IVRS table!\n");
for_each_amd_iommu ( iommu )
{
@@ -308,7 +308,7 @@ void amd_iommu_setup_domain_device(
invalidate_dev_table_entry(iommu, req_id);
flush_command_buffer(iommu);
- dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
+ amd_iov_info("Enable DTE:0x%x, "
"root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
req_id, root_ptr, hd->domain_id, hd->paging_mode);
@@ -354,7 +354,7 @@ void __init amd_iommu_setup_dom0_devices(void)
}
}
-int amd_iommu_detect(void)
+int amd_iov_detect(void)
{
unsigned long i;
int last_bus;
@@ -362,7 +362,7 @@ int amd_iommu_detect(void)
if ( !enable_amd_iommu )
{
- printk("AMD IOMMU: Disabled\n");
+ printk("AMD_IOV: Disabled.\n");
return 0;
}
@@ -370,13 +370,13 @@ int amd_iommu_detect(void)
if ( scan_for_iommu(iommu_detect_callback) != 0 )
{
- dprintk(XENLOG_ERR, "AMD IOMMU: Error detection\n");
+ amd_iov_error("Error detection\n");
goto error_out;
}
if ( !iommu_found() )
{
- printk("AMD IOMMU: Not found!\n");
+ printk("AMD_IOV: IOMMU not found!\n");
return 0;
}
else
@@ -394,8 +394,7 @@ int amd_iommu_detect(void)
if ( !ivrs_mappings )
{
- dprintk(XENLOG_ERR, "AMD IOMMU:"
- " Error allocating IVRS DevMappings table\n");
+ amd_iov_error("Error allocating IVRS DevMappings table\n");
goto error_out;
}
memset(ivrs_mappings, 0,
@@ -404,7 +403,7 @@ int amd_iommu_detect(void)
if ( amd_iommu_init() != 0 )
{
- dprintk(XENLOG_ERR, "AMD IOMMU: Error initialization\n");
+ amd_iov_error("Error initialization\n");
goto error_out;
}
@@ -462,8 +461,6 @@ static int get_paging_mode(unsigned long entries)
return -ENOMEM;
}
- dprintk(XENLOG_INFO, "AMD IOMMU: paging mode = %d\n", level);
-
return level;
}
@@ -505,7 +502,7 @@ static void amd_iommu_disable_domain_device(
memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
invalidate_dev_table_entry(iommu, req_id);
flush_command_buffer(iommu);
- dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x,"
+ amd_iov_info("Disable DTE:0x%x,"
" domain_id:%d, paging_mode:%d\n",
req_id, domain_hvm_iommu(domain)->domain_id,
domain_hvm_iommu(domain)->paging_mode);
@@ -540,7 +537,7 @@ static int reassign_device( struct domain *source, struct domain *target,
if ( !iommu )
{
- gdprintk(XENLOG_ERR , "AMD IOMMU: fail to find iommu."
+ amd_iov_error("Fail to find iommu."
" %x:%x.%x cannot be assigned to domain %d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id);
return -ENODEV;
@@ -555,8 +552,7 @@ static int reassign_device( struct domain *source, struct domain *target,
spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
amd_iommu_setup_domain_device(target, iommu, bdf);
- gdprintk(XENLOG_INFO ,
- "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n",
+ amd_iov_info("reassign %x:%x.%x domain %d -> domain %d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
source->domain_id, target->domain_id);
@@ -595,8 +591,7 @@ static void release_domain_devices(struct domain *d)
{
pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
pdev_flr(pdev->bus, pdev->devfn);
- gdprintk(XENLOG_INFO ,
- "AMD IOMMU: release devices %x:%x.%x\n",
+ amd_iov_info("release domain %d devices %x:%x.%x\n", d->domain_id,
pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
reassign_device(d, dom0, pdev->bus, pdev->devfn);
}
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 2cf5551337..51a6e20b44 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -18,6 +18,8 @@
extern struct iommu_ops intel_iommu_ops;
extern struct iommu_ops amd_iommu_ops;
+int intel_vtd_setup(void);
+int amd_iov_detect(void);
int iommu_domain_init(struct domain *domain)
{
@@ -134,3 +136,16 @@ void deassign_device(struct domain *d, u8 bus, u8 devfn)
return hd->platform_ops->reassign_device(d, dom0, bus, devfn);
}
+
+int iommu_setup(void)
+{
+ switch ( boot_cpu_data.x86_vendor )
+ {
+ case X86_VENDOR_INTEL:
+ return intel_vtd_setup();
+ case X86_VENDOR_AMD:
+ return amd_iov_detect();
+ }
+
+ return 0;
+}
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index cfbfac94bd..d7498e4d35 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1903,7 +1903,7 @@ static void setup_dom0_rmrr(void)
end_for_each_rmrr_device ( rmrr, pdev )
}
-int iommu_setup(void)
+int intel_vtd_setup(void)
{
struct hvm_iommu *hd = domain_hvm_iommu(dom0);
struct acpi_drhd_unit *drhd;
diff --git a/xen/include/asm-x86/amd-iommu.h b/xen/include/asm-x86/amd-iommu.h
index f577cbc59c..f896088f8d 100644
--- a/xen/include/asm-x86/amd-iommu.h
+++ b/xen/include/asm-x86/amd-iommu.h
@@ -31,7 +31,7 @@
extern int amd_iommu_enabled;
extern struct list_head amd_iommu_head;
-extern int __init amd_iommu_detect(void);
+extern int __init amd_iov_detect(void);
struct table_struct {
void *buffer;
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index aeedd33878..646ab7047e 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -35,6 +35,19 @@
#define DMA_32BIT_MASK 0x00000000ffffffffULL
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
+#ifdef AMD_IOV_DEBUG
+#define amd_iov_info(fmt, args...) \
+ printk(XENLOG_INFO "AMD_IOV: " fmt, ## args)
+#define amd_iov_warning(fmt, args...) \
+ printk(XENLOG_WARNING "AMD_IOV: " fmt, ## args)
+#define amd_iov_error(fmt, args...) \
+ printk(XENLOG_ERR "AMD_IOV: %s:%d: " fmt, __FILE__ , __LINE__ , ## args)
+#else
+#define amd_iov_info(fmt, args...)
+#define amd_iov_warning(fmt, args...)
+#define amd_iov_error(fmt, args...)
+#endif
+
typedef int (*iommu_detect_callback_ptr_t)(
u8 bus, u8 dev, u8 func, u8 cap_ptr);