From 5d43891bf4002b754cd90d83e91d9190e8c8b9d0 Mon Sep 17 00:00:00 2001 From: Tim Deegan Date: Thu, 9 May 2013 13:06:53 +0100 Subject: x86/hvm: avoid p2m lookups for vlapic accesses. The LAPIC base address is a known GFN, so we can skip looking up the p2m: we know it should be handled as emulated MMIO. That helps performance in older Windows OSes, which make a _lot_ of TPR accesses. This will change the behaviour of any OS that maps other memory/devices at its LAPIC address; the new behaviour (the LAPIC mapping always wins) is closer to actual hardware behaviour. Signed-off-by: Tim Deegan Acked-by: Jan Beulich --- xen/arch/x86/hvm/hvm.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 7c3cb15053..bcf96093d4 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1361,6 +1361,17 @@ int hvm_hap_nested_page_fault(paddr_t gpa, } } + /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs, + * a fast path for LAPIC accesses, skipping the p2m lookup. */ + if ( !nestedhvm_vcpu_in_guestmode(v) + && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) ) + { + if ( !handle_mmio() ) + hvm_inject_hw_exception(TRAP_gp_fault, 0); + rc = 1; + goto out; + } + p2m = p2m_get_hostp2m(v->domain); mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, P2M_ALLOC | (access_w ? P2M_UNSHARE : 0), NULL); @@ -2471,6 +2482,12 @@ static enum hvm_copy_result __hvm_copy( gfn = addr >> PAGE_SHIFT; } + /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs, + * a fast path for LAPIC accesses, skipping the p2m lookup. */ + if ( !nestedhvm_vcpu_in_guestmode(curr) + && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(curr))) ) + return HVMCOPY_bad_gfn_to_mfn; + page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE); if ( p2m_is_paging(p2mt) ) -- cgit v1.2.3