aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-03-27 10:52:54 +0000
committerKeir Fraser <keir.fraser@citrix.com>2008-03-27 10:52:54 +0000
commit8c8df59519d782fc31b41e7d1ec276c105e4eb4a (patch)
treeea91fd4236aa91a1c1f1d74ebf6a76c8ebf567d9 /xen
parent54e034136e4c4655f728edb332fb35195f1a3f13 (diff)
downloadxen-8c8df59519d782fc31b41e7d1ec276c105e4eb4a.tar.gz
xen-8c8df59519d782fc31b41e7d1ec276c105e4eb4a.tar.bz2
xen-8c8df59519d782fc31b41e7d1ec276c105e4eb4a.zip
x86_emulate: On HVM MMIO emulation, cache the gva->pfn mapping for the
MMIO page. Speeds up Windows installation by about 20 percent. Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/hvm/emulate.c27
-rw-r--r--xen/arch/x86/hvm/io.c11
-rw-r--r--xen/arch/x86/mm/shadow/multi.c6
-rw-r--r--xen/include/asm-x86/hvm/io.h1
-rw-r--r--xen/include/asm-x86/hvm/vcpu.h9
5 files changed, 49 insertions, 5 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 4b0a38e6e9..4ab2cd0301 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -214,7 +214,9 @@ static int __hvmemul_read(
enum hvm_access_type access_type,
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
+ struct vcpu *curr = current;
unsigned long addr;
+ paddr_t gpa;
int rc;
rc = hvmemul_virtual_to_linear(
@@ -224,6 +226,17 @@ static int __hvmemul_read(
*val = 0;
+ if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
+ curr->arch.hvm_vcpu.mmio_gva )
+ {
+ unsigned int off = addr & (PAGE_SIZE - 1);
+ if ( access_type == hvm_access_insn_fetch )
+ return X86EMUL_UNHANDLEABLE;
+ gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
+ if ( (off + bytes) <= PAGE_SIZE )
+ return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val);
+ }
+
rc = ((access_type == hvm_access_insn_fetch) ?
hvm_fetch_from_guest_virt(val, addr, bytes) :
hvm_copy_from_guest_virt(val, addr, bytes));
@@ -233,7 +246,6 @@ static int __hvmemul_read(
if ( rc == HVMCOPY_bad_gfn_to_mfn )
{
unsigned long reps = 1;
- paddr_t gpa;
if ( access_type == hvm_access_insn_fetch )
return X86EMUL_UNHANDLEABLE;
@@ -293,7 +305,9 @@ static int hvmemul_write(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+ struct vcpu *curr = current;
unsigned long addr;
+ paddr_t gpa;
int rc;
rc = hvmemul_virtual_to_linear(
@@ -301,6 +315,16 @@ static int hvmemul_write(
if ( rc != X86EMUL_OKAY )
return rc;
+ if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
+ curr->arch.hvm_vcpu.mmio_gva )
+ {
+ unsigned int off = addr & (PAGE_SIZE - 1);
+ gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
+ if ( (off + bytes) <= PAGE_SIZE )
+ return hvmemul_do_mmio(gpa, 1, bytes, val, IOREQ_WRITE,
+ 0, 0, NULL);
+ }
+
rc = hvm_copy_to_guest_virt(addr, &val, bytes);
if ( rc == HVMCOPY_bad_gva_to_gfn )
return X86EMUL_EXCEPTION;
@@ -308,7 +332,6 @@ static int hvmemul_write(
if ( rc == HVMCOPY_bad_gfn_to_mfn )
{
unsigned long reps = 1;
- paddr_t gpa;
rc = hvmemul_linear_to_phys(
addr, &gpa, bytes, &reps, hvm_access_write, hvmemul_ctxt);
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 041d237888..bc5f8acce3 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -183,7 +183,9 @@ int handle_mmio(void)
rc = hvm_emulate_one(&ctxt);
if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion )
- curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion;
+ curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion;
+ else
+ curr->arch.hvm_vcpu.mmio_gva = 0;
switch ( rc )
{
@@ -210,6 +212,13 @@ int handle_mmio(void)
return 1;
}
+int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn)
+{
+ current->arch.hvm_vcpu.mmio_gva = gva & PAGE_MASK;
+ current->arch.hvm_vcpu.mmio_gpfn = gpfn;
+ return handle_mmio();
+}
+
void hvm_io_assist(void)
{
struct vcpu *v = current;
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 4689d132c1..f92687c246 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2881,7 +2881,8 @@ static int sh_page_fault(struct vcpu *v,
perfc_incr(shadow_fault_fast_mmio);
SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
reset_early_unshadow(v);
- return handle_mmio() ? EXCRET_fault_fixed : 0;
+ return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
+ ? EXCRET_fault_fixed : 0);
}
else
{
@@ -3199,7 +3200,8 @@ static int sh_page_fault(struct vcpu *v,
shadow_audit_tables(v);
reset_early_unshadow(v);
shadow_unlock(d);
- return handle_mmio() ? EXCRET_fault_fixed : 0;
+ return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
+ ? EXCRET_fault_fixed : 0);
not_a_shadow_fault:
sh_audit_gw(v, &gw);
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 8fd631d8cf..535f909d5a 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -99,6 +99,7 @@ static inline int register_buffered_io_handler(
void send_timeoffset_req(unsigned long timeoff);
void send_invalidate_req(void);
int handle_mmio(void);
+int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn);
void hvm_interrupt_post(struct vcpu *v, int vector, int type);
void hvm_io_assist(void);
void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 0c76551575..d3281d20dc 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -80,6 +80,15 @@ struct hvm_vcpu {
/* I/O request in flight to device model. */
enum hvm_io_state io_state;
unsigned long io_data;
+
+ /*
+ * HVM emulation:
+ * Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
+ * The latter is known to be an MMIO frame (not RAM).
+ * This translation is only valid if @mmio_gva is non-zero.
+ */
+ unsigned long mmio_gva;
+ unsigned long mmio_gpfn;
};
#endif /* __ASM_X86_HVM_VCPU_H__ */