diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2008-03-27 10:52:54 +0000 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2008-03-27 10:52:54 +0000 |
commit | 8c8df59519d782fc31b41e7d1ec276c105e4eb4a (patch) | |
tree | ea91fd4236aa91a1c1f1d74ebf6a76c8ebf567d9 /xen/arch/x86/hvm | |
parent | 54e034136e4c4655f728edb332fb35195f1a3f13 (diff) | |
download | xen-8c8df59519d782fc31b41e7d1ec276c105e4eb4a.tar.gz xen-8c8df59519d782fc31b41e7d1ec276c105e4eb4a.tar.bz2 xen-8c8df59519d782fc31b41e7d1ec276c105e4eb4a.zip |
x86_emulate: On HVM MMIO emulation, cache the gva->pfn mapping for the
MMIO page. Speeds up Windows installation by about 20 percent.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/arch/x86/hvm')
-rw-r--r-- | xen/arch/x86/hvm/emulate.c | 27 | ||||
-rw-r--r-- | xen/arch/x86/hvm/io.c | 11 |
2 files changed, 35 insertions, 3 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 4b0a38e6e9..4ab2cd0301 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -214,7 +214,9 @@ static int __hvmemul_read( enum hvm_access_type access_type, struct hvm_emulate_ctxt *hvmemul_ctxt) { + struct vcpu *curr = current; unsigned long addr; + paddr_t gpa; int rc; rc = hvmemul_virtual_to_linear( @@ -224,6 +226,17 @@ static int __hvmemul_read( *val = 0; + if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) && + curr->arch.hvm_vcpu.mmio_gva ) + { + unsigned int off = addr & (PAGE_SIZE - 1); + if ( access_type == hvm_access_insn_fetch ) + return X86EMUL_UNHANDLEABLE; + gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off); + if ( (off + bytes) <= PAGE_SIZE ) + return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val); + } + rc = ((access_type == hvm_access_insn_fetch) ? hvm_fetch_from_guest_virt(val, addr, bytes) : hvm_copy_from_guest_virt(val, addr, bytes)); @@ -233,7 +246,6 @@ static int __hvmemul_read( if ( rc == HVMCOPY_bad_gfn_to_mfn ) { unsigned long reps = 1; - paddr_t gpa; if ( access_type == hvm_access_insn_fetch ) return X86EMUL_UNHANDLEABLE; @@ -293,7 +305,9 @@ static int hvmemul_write( { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); + struct vcpu *curr = current; unsigned long addr; + paddr_t gpa; int rc; rc = hvmemul_virtual_to_linear( @@ -301,6 +315,16 @@ static int hvmemul_write( if ( rc != X86EMUL_OKAY ) return rc; + if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) && + curr->arch.hvm_vcpu.mmio_gva ) + { + unsigned int off = addr & (PAGE_SIZE - 1); + gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off); + if ( (off + bytes) <= PAGE_SIZE ) + return hvmemul_do_mmio(gpa, 1, bytes, val, IOREQ_WRITE, + 0, 0, NULL); + } + rc = hvm_copy_to_guest_virt(addr, &val, bytes); if ( rc == HVMCOPY_bad_gva_to_gfn ) return X86EMUL_EXCEPTION; @@ -308,7 +332,6 @@ static int hvmemul_write( if ( rc == HVMCOPY_bad_gfn_to_mfn ) { unsigned long reps = 1; - paddr_t gpa; rc = hvmemul_linear_to_phys( addr, &gpa, bytes, &reps, hvm_access_write, hvmemul_ctxt); diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 041d237888..bc5f8acce3 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -183,7 +183,9 @@ int handle_mmio(void) rc = hvm_emulate_one(&ctxt); if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion ) - curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion; + curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion; + else + curr->arch.hvm_vcpu.mmio_gva = 0; switch ( rc ) { @@ -210,6 +212,13 @@ int handle_mmio(void) return 1; } +int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn) +{ + current->arch.hvm_vcpu.mmio_gva = gva & PAGE_MASK; + current->arch.hvm_vcpu.mmio_gpfn = gpfn; + return handle_mmio(); +} + void hvm_io_assist(void) { struct vcpu *v = current; |