aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/emulate.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-10-14 09:54:09 +0200
committerJan Beulich <jbeulich@suse.com>2013-10-14 09:54:09 +0200
commit699be3096ce62e2d6ef953b2a223c34330598ac8 (patch)
tree63a39efbd838441f7f4be3684cdab67f9a2ddcb4 /xen/arch/x86/hvm/emulate.c
parent63b02eff535b8798517b5a280cc2fff2f7d415c1 (diff)
downloadxen-699be3096ce62e2d6ef953b2a223c34330598ac8.tar.gz
xen-699be3096ce62e2d6ef953b2a223c34330598ac8.tar.bz2
xen-699be3096ce62e2d6ef953b2a223c34330598ac8.zip
x86/HVM: cache emulated instruction for retry processing
Rather than re-reading the instruction bytes upon retry processing, stash away and re-use what we already read. That way we can be certain that the retry won't do something different from what requested the retry, getting once again closer to real hardware behavior (where what we use retries for is simply a bus operation, not involving redundant decoding of instructions). Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/hvm/emulate.c')
-rw-r--r--xen/arch/x86/hvm/emulate.c57
1 files changed, 43 insertions, 14 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 6cb6678175..f39c17358e 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -569,9 +569,19 @@ static int hvmemul_insn_fetch(
/* Fall back if requested bytes are not in the prefetch cache. */
if ( unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
- return __hvmemul_read(
- seg, offset, p_data, bytes,
- hvm_access_insn_fetch, hvmemul_ctxt);
+ {
+ int rc = __hvmemul_read(seg, offset, p_data, bytes,
+ hvm_access_insn_fetch, hvmemul_ctxt);
+
+ if ( rc == X86EMUL_OKAY )
+ {
+ ASSERT(insn_off + bytes <= sizeof(hvmemul_ctxt->insn_buf));
+ memcpy(&hvmemul_ctxt->insn_buf[insn_off], p_data, bytes);
+ hvmemul_ctxt->insn_buf_bytes = insn_off + bytes;
+ }
+
+ return rc;
+ }
/* Hit the cache. Simple memcpy. */
memcpy(p_data, &hvmemul_ctxt->insn_buf[insn_off], bytes);
@@ -1148,17 +1158,27 @@ int hvm_emulate_one(
pfec |= PFEC_user_mode;
hvmemul_ctxt->insn_buf_eip = regs->eip;
- hvmemul_ctxt->insn_buf_bytes =
- hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf)
- ? :
- (hvm_virtual_to_linear_addr(
- x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs],
- regs->eip, sizeof(hvmemul_ctxt->insn_buf),
- hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) &&
- !hvm_fetch_from_guest_virt_nofault(
- hvmemul_ctxt->insn_buf, addr,
- sizeof(hvmemul_ctxt->insn_buf), pfec))
- ? sizeof(hvmemul_ctxt->insn_buf) : 0;
+ if ( !vio->mmio_insn_bytes )
+ {
+ hvmemul_ctxt->insn_buf_bytes =
+ hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?:
+ (hvm_virtual_to_linear_addr(x86_seg_cs,
+ &hvmemul_ctxt->seg_reg[x86_seg_cs],
+ regs->eip,
+ sizeof(hvmemul_ctxt->insn_buf),
+ hvm_access_insn_fetch,
+ hvmemul_ctxt->ctxt.addr_size,
+ &addr) &&
+ hvm_fetch_from_guest_virt_nofault(hvmemul_ctxt->insn_buf, addr,
+ sizeof(hvmemul_ctxt->insn_buf),
+ pfec) == HVMCOPY_okay) ?
+ sizeof(hvmemul_ctxt->insn_buf) : 0;
+ }
+ else
+ {
+ hvmemul_ctxt->insn_buf_bytes = vio->mmio_insn_bytes;
+ memcpy(hvmemul_ctxt->insn_buf, vio->mmio_insn, vio->mmio_insn_bytes);
+ }
hvmemul_ctxt->exn_pending = 0;
vio->mmio_retrying = vio->mmio_retry;
@@ -1169,7 +1189,16 @@ int hvm_emulate_one(
if ( rc == X86EMUL_OKAY && vio->mmio_retry )
rc = X86EMUL_RETRY;
if ( rc != X86EMUL_RETRY )
+ {
vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
+ vio->mmio_insn_bytes = 0;
+ }
+ else
+ {
+ BUILD_BUG_ON(sizeof(vio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf));
+ vio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes;
+ memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes);
+ }
if ( rc != X86EMUL_OKAY )
return rc;