aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Deegan <tim@xen.org>2013-05-09 13:06:53 +0100
committerTim Deegan <tim@xen.org>2013-05-16 12:05:25 +0100
commit5d43891bf4002b754cd90d83e91d9190e8c8b9d0 (patch)
tree339573766ac293e781251fd45ef5f869deecce94
parent71ba77fcf27149a6c75d7d4720f6566f7034ee55 (diff)
downloadxen-4.3.0-rc2.tar.gz
xen-4.3.0-rc2.tar.bz2
xen-4.3.0-rc2.zip
x86/hvm: avoid p2m lookups for vlapic accesses.xen-4.3.0-rc24.3.0-rc2
The LAPIC base address is a known GFN, so we can skip looking up the p2m: we know it should be handled as emulated MMIO. That helps performance in older Windows OSes, which make a _lot_ of TPR accesses. This will change the behaviour of any OS that maps other memory/devices at its LAPIC address; the new behaviour (the LAPIC mapping always wins) is closer to actual hardware behaviour. Signed-off-by: Tim Deegan <tim@xen.org> Acked-by: Jan Beulich <jbeulich@suse.com>
-rw-r--r--xen/arch/x86/hvm/hvm.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7c3cb15053..bcf96093d4 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1361,6 +1361,17 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
}
}
+ /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs,
+ * a fast path for LAPIC accesses, skipping the p2m lookup. */
+ if ( !nestedhvm_vcpu_in_guestmode(v)
+ && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) )
+ {
+ if ( !handle_mmio() )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ rc = 1;
+ goto out;
+ }
+
p2m = p2m_get_hostp2m(v->domain);
mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma,
P2M_ALLOC | (access_w ? P2M_UNSHARE : 0), NULL);
@@ -2471,6 +2482,12 @@ static enum hvm_copy_result __hvm_copy(
gfn = addr >> PAGE_SHIFT;
}
+ /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs,
+ * a fast path for LAPIC accesses, skipping the p2m lookup. */
+ if ( !nestedhvm_vcpu_in_guestmode(curr)
+ && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(curr))) )
+ return HVMCOPY_bad_gfn_to_mfn;
+
page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )