aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Egger <Christoph.Egger@amd.com>2011-10-25 16:48:17 +0100
committerChristoph Egger <Christoph.Egger@amd.com>2011-10-25 16:48:17 +0100
commitac97fa6a21ccd395cca43890bbd0bf32e3255ebb (patch)
treed44577ae12097d61bf9849821a5ced7ec79d56d6
parent56d1d85a0c580aa20ca7d4b15f19ace899bb5b36 (diff)
downloadxen-ac97fa6a21ccd395cca43890bbd0bf32e3255ebb.tar.gz
xen-ac97fa6a21ccd395cca43890bbd0bf32e3255ebb.tar.bz2
xen-ac97fa6a21ccd395cca43890bbd0bf32e3255ebb.zip
hvm: Clean up I/O emulation
Move HVM io fields into a structure. On MMIO instruction failure print out some more bytes. Signed-off-by: Christoph Egger <Christoph.Egger@amd.com> Committed-by: Keir Fraser <keir@xen.org>
-rw-r--r--xen/arch/x86/hvm/emulate.c70
-rw-r--r--xen/arch/x86/hvm/io.c41
-rw-r--r--xen/arch/x86/hvm/svm/nestedsvm.c2
-rw-r--r--xen/arch/x86/hvm/vmx/realmode.c7
-rw-r--r--xen/include/asm-x86/hvm/vcpu.h46
5 files changed, 89 insertions, 77 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 4d225856a2..c9b3214679 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -55,6 +55,7 @@ static int hvmemul_do_io(
paddr_t value = ram_gpa;
int value_is_ptr = (p_data == NULL);
struct vcpu *curr = current;
+ struct hvm_vcpu_io *vio;
ioreq_t *p = get_ioreq(curr);
unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
p2m_type_t p2mt;
@@ -90,43 +91,45 @@ static int hvmemul_do_io(
p_data = NULL;
}
+ vio = &curr->arch.hvm_vcpu.hvm_io;
+
if ( is_mmio && !value_is_ptr )
{
/* Part of a multi-cycle read or write? */
if ( dir == IOREQ_WRITE )
{
- paddr_t pa = curr->arch.hvm_vcpu.mmio_large_write_pa;
- unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_write_bytes;
+ paddr_t pa = vio->mmio_large_write_pa;
+ unsigned int bytes = vio->mmio_large_write_bytes;
if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
return X86EMUL_OKAY;
}
else
{
- paddr_t pa = curr->arch.hvm_vcpu.mmio_large_read_pa;
- unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes;
+ paddr_t pa = vio->mmio_large_read_pa;
+ unsigned int bytes = vio->mmio_large_read_bytes;
if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
{
- memcpy(p_data, &curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
+ memcpy(p_data, &vio->mmio_large_read[addr - pa],
size);
return X86EMUL_OKAY;
}
}
}
- switch ( curr->arch.hvm_vcpu.io_state )
+ switch ( vio->io_state )
{
case HVMIO_none:
break;
case HVMIO_completed:
- curr->arch.hvm_vcpu.io_state = HVMIO_none;
+ vio->io_state = HVMIO_none;
if ( p_data == NULL )
return X86EMUL_UNHANDLEABLE;
goto finish_access;
case HVMIO_dispatched:
/* May have to wait for previous cycle of a multi-write to complete. */
if ( is_mmio && !value_is_ptr && (dir == IOREQ_WRITE) &&
- (addr == (curr->arch.hvm_vcpu.mmio_large_write_pa +
- curr->arch.hvm_vcpu.mmio_large_write_bytes)) )
+ (addr == (vio->mmio_large_write_pa +
+ vio->mmio_large_write_bytes)) )
return X86EMUL_RETRY;
default:
return X86EMUL_UNHANDLEABLE;
@@ -139,9 +142,9 @@ static int hvmemul_do_io(
return X86EMUL_UNHANDLEABLE;
}
- curr->arch.hvm_vcpu.io_state =
+ vio->io_state =
(p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
- curr->arch.hvm_vcpu.io_size = size;
+ vio->io_size = size;
p->dir = dir;
p->data_is_ptr = value_is_ptr;
@@ -172,12 +175,12 @@ static int hvmemul_do_io(
*reps = p->count;
p->state = STATE_IORESP_READY;
hvm_io_assist();
- curr->arch.hvm_vcpu.io_state = HVMIO_none;
+ vio->io_state = HVMIO_none;
break;
case X86EMUL_UNHANDLEABLE:
rc = X86EMUL_RETRY;
if ( !hvm_send_assist_req(curr) )
- curr->arch.hvm_vcpu.io_state = HVMIO_none;
+ vio->io_state = HVMIO_none;
else if ( p_data == NULL )
rc = X86EMUL_OKAY;
break;
@@ -190,33 +193,32 @@ static int hvmemul_do_io(
finish_access:
if ( p_data != NULL )
- memcpy(p_data, &curr->arch.hvm_vcpu.io_data, size);
+ memcpy(p_data, &vio->io_data, size);
if ( is_mmio && !value_is_ptr )
{
/* Part of a multi-cycle read or write? */
if ( dir == IOREQ_WRITE )
{
- paddr_t pa = curr->arch.hvm_vcpu.mmio_large_write_pa;
- unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_write_bytes;
+ paddr_t pa = vio->mmio_large_write_pa;
+ unsigned int bytes = vio->mmio_large_write_bytes;
if ( bytes == 0 )
- pa = curr->arch.hvm_vcpu.mmio_large_write_pa = addr;
+ pa = vio->mmio_large_write_pa = addr;
if ( addr == (pa + bytes) )
- curr->arch.hvm_vcpu.mmio_large_write_bytes += size;
+ vio->mmio_large_write_bytes += size;
}
else
{
- paddr_t pa = curr->arch.hvm_vcpu.mmio_large_read_pa;
- unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes;
+ paddr_t pa = vio->mmio_large_read_pa;
+ unsigned int bytes = vio->mmio_large_read_bytes;
if ( bytes == 0 )
- pa = curr->arch.hvm_vcpu.mmio_large_read_pa = addr;
+ pa = vio->mmio_large_read_pa = addr;
if ( (addr == (pa + bytes)) &&
((bytes + size) <
- sizeof(curr->arch.hvm_vcpu.mmio_large_read)) )
+ sizeof(vio->mmio_large_read)) )
{
- memcpy(&curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
- p_data, size);
- curr->arch.hvm_vcpu.mmio_large_read_bytes += size;
+ memcpy(&vio->mmio_large_read[addr - pa], p_data, size);
+ vio->mmio_large_read_bytes += size;
}
}
}
@@ -400,6 +402,7 @@ static int __hvmemul_read(
struct vcpu *curr = current;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
paddr_t gpa;
int rc;
@@ -408,13 +411,12 @@ static int __hvmemul_read(
if ( rc != X86EMUL_OKAY )
return rc;
- if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
- curr->arch.hvm_vcpu.mmio_gva )
+ if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
{
unsigned int off = addr & (PAGE_SIZE - 1);
if ( access_type == hvm_access_insn_fetch )
return X86EMUL_UNHANDLEABLE;
- gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
+ gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
if ( (off + bytes) <= PAGE_SIZE )
return hvmemul_do_mmio(gpa, &reps, bytes, 0,
IOREQ_READ, 0, p_data);
@@ -499,6 +501,7 @@ static int hvmemul_write(
struct vcpu *curr = current;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
paddr_t gpa;
int rc;
@@ -507,11 +510,10 @@ static int hvmemul_write(
if ( rc != X86EMUL_OKAY )
return rc;
- if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
- curr->arch.hvm_vcpu.mmio_gva )
+ if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
{
unsigned int off = addr & (PAGE_SIZE - 1);
- gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
+ gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
if ( (off + bytes) <= PAGE_SIZE )
return hvmemul_do_mmio(gpa, &reps, bytes, 0,
IOREQ_WRITE, 0, p_data);
@@ -529,7 +531,7 @@ static int hvmemul_write(
return X86EMUL_EXCEPTION;
case HVMCOPY_unhandleable:
return X86EMUL_UNHANDLEABLE;
- case HVMCOPY_bad_gfn_to_mfn:
+ case HVMCOPY_bad_gfn_to_mfn:
rc = hvmemul_linear_to_phys(
addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
if ( rc != X86EMUL_OKAY )
@@ -973,6 +975,7 @@ int hvm_emulate_one(
struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
struct vcpu *curr = current;
uint32_t new_intr_shadow, pfec = PFEC_page_present;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
unsigned long addr;
int rc;
@@ -1010,8 +1013,7 @@ int hvm_emulate_one(
rc = x86_emulate(&hvmemul_ctxt->ctxt, &hvm_emulate_ops);
if ( rc != X86EMUL_RETRY )
- curr->arch.hvm_vcpu.mmio_large_read_bytes =
- curr->arch.hvm_vcpu.mmio_large_write_bytes = 0;
+ vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
if ( rc != X86EMUL_OKAY )
return rc;
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 1c35d57438..7ebefc7460 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -170,28 +170,31 @@ int handle_mmio(void)
{
struct hvm_emulate_ctxt ctxt;
struct vcpu *curr = current;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
int rc;
hvm_emulate_prepare(&ctxt, guest_cpu_user_regs());
rc = hvm_emulate_one(&ctxt);
- if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion )
- curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion;
+ if ( vio->io_state == HVMIO_awaiting_completion )
+ vio->io_state = HVMIO_handle_mmio_awaiting_completion;
else
- curr->arch.hvm_vcpu.mmio_gva = 0;
+ vio->mmio_gva = 0;
switch ( rc )
{
case X86EMUL_UNHANDLEABLE:
gdprintk(XENLOG_WARNING,
"MMIO emulation failed @ %04x:%lx: "
- "%02x %02x %02x %02x %02x %02x\n",
+ "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
hvmemul_get_seg_reg(x86_seg_cs, &ctxt)->sel,
ctxt.insn_buf_eip,
ctxt.insn_buf[0], ctxt.insn_buf[1],
ctxt.insn_buf[2], ctxt.insn_buf[3],
- ctxt.insn_buf[4], ctxt.insn_buf[5]);
+ ctxt.insn_buf[4], ctxt.insn_buf[5],
+ ctxt.insn_buf[6], ctxt.insn_buf[7],
+ ctxt.insn_buf[8], ctxt.insn_buf[9]);
return 0;
case X86EMUL_EXCEPTION:
if ( ctxt.exn_pending )
@@ -208,14 +211,16 @@ int handle_mmio(void)
int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn)
{
- current->arch.hvm_vcpu.mmio_gva = gva & PAGE_MASK;
- current->arch.hvm_vcpu.mmio_gpfn = gpfn;
+ struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
+ vio->mmio_gva = gva & PAGE_MASK;
+ vio->mmio_gpfn = gpfn;
return handle_mmio();
}
int handle_pio(uint16_t port, int size, int dir)
{
struct vcpu *curr = current;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
unsigned long data, reps = 1;
int rc;
@@ -228,15 +233,14 @@ int handle_pio(uint16_t port, int size, int dir)
{
case X86EMUL_OKAY:
if ( dir == IOREQ_READ )
- memcpy(&guest_cpu_user_regs()->eax,
- &data, curr->arch.hvm_vcpu.io_size);
+ memcpy(&guest_cpu_user_regs()->eax, &data, vio->io_size);
break;
case X86EMUL_RETRY:
- if ( curr->arch.hvm_vcpu.io_state != HVMIO_awaiting_completion )
+ if ( vio->io_state != HVMIO_awaiting_completion )
return 0;
/* Completion in hvm_io_assist() with no re-emulation required. */
ASSERT(dir == IOREQ_READ);
- curr->arch.hvm_vcpu.io_state = HVMIO_handle_pio_awaiting_completion;
+ vio->io_state = HVMIO_handle_pio_awaiting_completion;
break;
default:
gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
@@ -250,6 +254,7 @@ int handle_pio(uint16_t port, int size, int dir)
void hvm_io_assist(void)
{
struct vcpu *curr = current;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
ioreq_t *p = get_ioreq(curr);
enum hvm_io_state io_state;
@@ -257,23 +262,23 @@ void hvm_io_assist(void)
p->state = STATE_IOREQ_NONE;
- io_state = curr->arch.hvm_vcpu.io_state;
- curr->arch.hvm_vcpu.io_state = HVMIO_none;
+ io_state = vio->io_state;
+ vio->io_state = HVMIO_none;
switch ( io_state )
{
case HVMIO_awaiting_completion:
- curr->arch.hvm_vcpu.io_state = HVMIO_completed;
- curr->arch.hvm_vcpu.io_data = p->data;
+ vio->io_state = HVMIO_completed;
+ vio->io_data = p->data;
break;
case HVMIO_handle_mmio_awaiting_completion:
- curr->arch.hvm_vcpu.io_state = HVMIO_completed;
- curr->arch.hvm_vcpu.io_data = p->data;
+ vio->io_state = HVMIO_completed;
+ vio->io_data = p->data;
(void)handle_mmio();
break;
case HVMIO_handle_pio_awaiting_completion:
memcpy(&guest_cpu_user_regs()->eax,
- &p->data, curr->arch.hvm_vcpu.io_size);
+ &p->data, vio->io_size);
break;
default:
break;
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index bc2c50887e..b7cc0f4c97 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1168,7 +1168,7 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
* Delay the injection because this would result in delivering
* an interrupt *within* the execution of an instruction.
*/
- if ( v->arch.hvm_vcpu.io_state != HVMIO_none )
+ if ( v->arch.hvm_vcpu.hvm_io.io_state != HVMIO_none )
return hvm_intblk_shadow;
}
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 5eb0e65c53..ce7a13b08b 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -172,6 +172,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
struct vcpu *curr = current;
struct hvm_emulate_ctxt hvmemul_ctxt;
struct segment_register *sreg;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
unsigned long intr_info;
unsigned int emulations = 0;
@@ -182,7 +183,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
hvm_emulate_prepare(&hvmemul_ctxt, regs);
- if ( curr->arch.hvm_vcpu.io_state == HVMIO_completed )
+ if ( vio->io_state == HVMIO_completed )
realmode_emulate_one(&hvmemul_ctxt);
/* Only deliver interrupts into emulated real mode. */
@@ -196,7 +197,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
curr->arch.hvm_vmx.vmx_emulate = 1;
while ( curr->arch.hvm_vmx.vmx_emulate &&
!softirq_pending(smp_processor_id()) &&
- (curr->arch.hvm_vcpu.io_state == HVMIO_none) )
+ (vio->io_state == HVMIO_none) )
{
/*
* Check for pending interrupts only every 16 instructions, because
@@ -221,7 +222,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
}
/* Need to emulate next time if we've started an IO operation */
- if ( curr->arch.hvm_vcpu.io_state != HVMIO_none )
+ if ( vio->io_state != HVMIO_none )
curr->arch.hvm_vmx.vmx_emulate = 1;
if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index d899c9a713..537da96f5e 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -44,6 +44,30 @@ struct hvm_vcpu_asid {
uint32_t asid;
};
+struct hvm_vcpu_io {
+ /* I/O request in flight to device model. */
+ enum hvm_io_state io_state;
+ unsigned long io_data;
+ int io_size;
+
+ /*
+ * HVM emulation:
+ * Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
+ * The latter is known to be an MMIO frame (not RAM).
+ * This translation is only valid if @mmio_gva is non-zero.
+ */
+ unsigned long mmio_gva;
+ unsigned long mmio_gpfn;
+
+ /* We may read up to m128 as a number of device-model transactions. */
+ paddr_t mmio_large_read_pa;
+ uint8_t mmio_large_read[16];
+ unsigned int mmio_large_read_bytes;
+ /* We may write up to m128 as a number of device-model transactions. */
+ paddr_t mmio_large_write_pa;
+ unsigned int mmio_large_write_bytes;
+};
+
#define VMCX_EADDR (~0ULL)
struct nestedvcpu {
@@ -135,31 +159,11 @@ struct hvm_vcpu {
/* Which cache mode is this VCPU in (CR0:CD/NW)? */
u8 cache_mode;
- /* I/O request in flight to device model. */
- enum hvm_io_state io_state;
- unsigned long io_data;
- int io_size;
-
- /*
- * HVM emulation:
- * Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
- * The latter is known to be an MMIO frame (not RAM).
- * This translation is only valid if @mmio_gva is non-zero.
- */
- unsigned long mmio_gva;
- unsigned long mmio_gpfn;
+ struct hvm_vcpu_io hvm_io;
/* Callback into x86_emulate when emulating FPU/MMX/XMM instructions. */
void (*fpu_exception_callback)(void *, struct cpu_user_regs *);
void *fpu_exception_callback_arg;
- /* We may read up to m128 as a number of device-model transactions. */
- paddr_t mmio_large_read_pa;
- uint8_t mmio_large_read[16];
- unsigned int mmio_large_read_bytes;
- /* We may write up to m128 as a number of device-model transactions. */
- paddr_t mmio_large_write_pa;
- unsigned int mmio_large_write_bytes;
-
/* Pending hw/sw interrupt */
int inject_trap; /* -1 for nothing to inject */
int inject_error_code;