aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/vmsi.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2012-01-23 09:35:17 +0000
committerJan Beulich <jbeulich@suse.com>2012-01-23 09:35:17 +0000
commit74c213c506afcd74a8556dd092995fd4dc38b225 (patch)
tree2d0e2eb788d9d710f0ac438a2ad63739bebf0565 /xen/arch/x86/hvm/vmsi.c
parent9346f6cfbfb992b4fb38b802b794f194f1209dbf (diff)
downloadxen-74c213c506afcd74a8556dd092995fd4dc38b225.tar.gz
xen-74c213c506afcd74a8556dd092995fd4dc38b225.tar.bz2
xen-74c213c506afcd74a8556dd092995fd4dc38b225.zip
x86/vMSI: miscellaneous fixes
This addresses a number of problems in msixtbl_{read,write}(): - address alignment was not checked, allowing for memory corruption in the hypervisor (write case) or returning of hypervisor private data to the guest (read case) - the interrupt mask bit was permitted to be written by the guest (while Xen's interrupt flow control routines need to control it) - MAX_MSIX_TABLE_{ENTRIES,PAGES} were pointlessly defined to plain numbers (making it unobvious why they have these values, and making the latter non-portable) - MAX_MSIX_TABLE_PAGES was also off by one (failing to account for a non-zero table offset); this was also affecting host MSI-X code - struct msixtbl_entry's table_flags[] was one element larger than necessary due to improper open-coding of BITS_TO_LONGS() - msixtbl_read() unconditionally accessed the physical table, even though the data was only needed in a quarter of all cases - various calculations were done unnecessarily for both of the rather distinct code paths in msixtbl_read() Additionally it is unclear on what basis MAX_MSIX_ACC_ENTRIES was chosen to be 3. Signed-off-by: Jan Beulich <jbeulich@suse.com> Committed-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/hvm/vmsi.c')
-rw-r--r--xen/arch/x86/hvm/vmsi.c59
1 files changed, 33 insertions, 26 deletions
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index 9ea861fca6..517b6b31f9 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -165,7 +165,7 @@ struct msixtbl_entry
struct pci_dev *pdev;
unsigned long gtable; /* gpa of msix table */
unsigned long table_len;
- unsigned long table_flags[MAX_MSIX_TABLE_ENTRIES / BITS_PER_LONG + 1];
+ unsigned long table_flags[BITS_TO_LONGS(MAX_MSIX_TABLE_ENTRIES)];
#define MAX_MSIX_ACC_ENTRIES 3
struct {
uint32_t msi_ad[3]; /* Shadow of address low, high and data */
@@ -192,17 +192,14 @@ static struct msixtbl_entry *msixtbl_find_entry(
static void __iomem *msixtbl_addr_to_virt(
struct msixtbl_entry *entry, unsigned long addr)
{
- int idx, nr_page;
+ unsigned int idx, nr_page;
- if ( !entry )
+ if ( !entry || !entry->pdev )
return NULL;
nr_page = (addr >> PAGE_SHIFT) -
(entry->gtable >> PAGE_SHIFT);
- if ( !entry->pdev )
- return NULL;
-
idx = entry->pdev->msix_table_idx[nr_page];
if ( !idx )
return NULL;
@@ -215,37 +212,34 @@ static int msixtbl_read(
struct vcpu *v, unsigned long address,
unsigned long len, unsigned long *pval)
{
- unsigned long offset, val;
+ unsigned long offset;
struct msixtbl_entry *entry;
void *virt;
- int nr_entry, index;
+ unsigned int nr_entry, index;
int r = X86EMUL_UNHANDLEABLE;
- rcu_read_lock(&msixtbl_rcu_lock);
+ if ( len != 4 || (address & 3) )
+ return r;
- if ( len != 4 )
- goto out;
+ rcu_read_lock(&msixtbl_rcu_lock);
entry = msixtbl_find_entry(v, address);
- virt = msixtbl_addr_to_virt(entry, address);
- if ( !virt )
- goto out;
-
- nr_entry = (address - entry->gtable) / PCI_MSIX_ENTRY_SIZE;
offset = address & (PCI_MSIX_ENTRY_SIZE - 1);
- if ( nr_entry >= MAX_MSIX_ACC_ENTRIES &&
- offset != PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET )
- goto out;
- val = readl(virt);
if ( offset != PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET )
{
+ nr_entry = (address - entry->gtable) / PCI_MSIX_ENTRY_SIZE;
+ if ( nr_entry >= MAX_MSIX_ACC_ENTRIES )
+ goto out;
index = offset / sizeof(uint32_t);
*pval = entry->gentries[nr_entry].msi_ad[index];
}
else
{
- *pval = val;
+ virt = msixtbl_addr_to_virt(entry, address);
+ if ( !virt )
+ goto out;
+ *pval = readl(virt);
}
r = X86EMUL_OKAY;
@@ -260,13 +254,13 @@ static int msixtbl_write(struct vcpu *v, unsigned long address,
unsigned long offset;
struct msixtbl_entry *entry;
void *virt;
- int nr_entry, index;
+ unsigned int nr_entry, index;
int r = X86EMUL_UNHANDLEABLE;
- rcu_read_lock(&msixtbl_rcu_lock);
+ if ( len != 4 || (address & 3) )
+ return r;
- if ( len != 4 )
- goto out;
+ rcu_read_lock(&msixtbl_rcu_lock);
entry = msixtbl_find_entry(v, address);
nr_entry = (address - entry->gtable) / PCI_MSIX_ENTRY_SIZE;
@@ -291,9 +285,22 @@ static int msixtbl_write(struct vcpu *v, unsigned long address,
if ( !virt )
goto out;
+ /* Do not allow the mask bit to be changed. */
+#if 0 /* XXX
+ * As the mask bit is the only defined bit in the word, and as the
+ * host MSI-X code doesn't preserve the other bits anyway, doing
+ * this is pointless. So for now just discard the write (also
+ * saving us from having to determine the matching irq_desc).
+ */
+ spin_lock_irqsave(&desc->lock, flags);
+ orig = readl(virt);
+ val &= ~PCI_MSIX_VECTOR_BITMASK;
+ val |= orig & PCI_MSIX_VECTOR_BITMASK;
writel(val, virt);
- r = X86EMUL_OKAY;
+ spin_unlock_irqrestore(&desc->lock, flags);
+#endif
+ r = X86EMUL_OKAY;
out:
rcu_read_unlock(&msixtbl_rcu_lock);
return r;