aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-04-15 10:07:42 +0100
committerKeir Fraser <keir@xen.org>2011-04-15 10:07:42 +0100
commit3c9c26645ddd820084e2da86366a54990c363b54 (patch)
tree5b1353a72ecafe588ef7a92a3935180df4f1fc12
parent8d24303023ec82d94f97154785302d52e9917f91 (diff)
downloadxen-3c9c26645ddd820084e2da86366a54990c363b54.tar.gz
xen-3c9c26645ddd820084e2da86366a54990c363b54.tar.bz2
xen-3c9c26645ddd820084e2da86366a54990c363b54.zip
nestedhvm: Allocate a separate host ASID for each L2 VCPU.
This avoids TLB flushing on every L1/L2 transition. Signed-off-by: Keir Fraser <keir@xen.org> Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
-rw-r--r--xen/arch/x86/hvm/asid.c21
-rw-r--r--xen/arch/x86/hvm/svm/asid.c10
-rw-r--r--xen/arch/x86/hvm/svm/nestedsvm.c16
-rw-r--r--xen/arch/x86/hvm/svm/svm.c14
-rw-r--r--xen/arch/x86/hvm/vmx/vmcs.c3
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c8
-rw-r--r--xen/include/asm-x86/hvm/asid.h8
-rw-r--r--xen/include/asm-x86/hvm/vcpu.h10
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmx.h2
9 files changed, 61 insertions, 31 deletions
diff --git a/xen/arch/x86/hvm/asid.c b/xen/arch/x86/hvm/asid.c
index 1cccaf53d5..bfbf0d174e 100644
--- a/xen/arch/x86/hvm/asid.c
+++ b/xen/arch/x86/hvm/asid.c
@@ -78,9 +78,15 @@ void hvm_asid_init(int nasids)
data->next_asid = 1;
}
+void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid)
+{
+ asid->generation = 0;
+}
+
void hvm_asid_flush_vcpu(struct vcpu *v)
{
- v->arch.hvm_vcpu.asid_generation = 0;
+ hvm_asid_flush_vcpu_asid(&v->arch.hvm_vcpu.n1asid);
+ hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
}
void hvm_asid_flush_core(void)
@@ -102,9 +108,8 @@ void hvm_asid_flush_core(void)
data->disabled = 1;
}
-bool_t hvm_asid_handle_vmenter(void)
+bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid)
{
- struct vcpu *curr = current;
struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
/* On erratum #170 systems we must flush the TLB.
@@ -113,7 +118,7 @@ bool_t hvm_asid_handle_vmenter(void)
goto disabled;
/* Test if VCPU has valid ASID. */
- if ( curr->arch.hvm_vcpu.asid_generation == data->core_asid_generation )
+ if ( asid->generation == data->core_asid_generation )
return 0;
/* If there are no free ASIDs, need to go to a new generation */
@@ -126,17 +131,17 @@ bool_t hvm_asid_handle_vmenter(void)
}
/* Now guaranteed to be a free ASID. */
- curr->arch.hvm_vcpu.asid = data->next_asid++;
- curr->arch.hvm_vcpu.asid_generation = data->core_asid_generation;
+ asid->asid = data->next_asid++;
+ asid->generation = data->core_asid_generation;
/*
* When we assign ASID 1, flush all TLB entries as we are starting a new
* generation, and all old ASID allocations are now stale.
*/
- return (curr->arch.hvm_vcpu.asid == 1);
+ return (asid->asid == 1);
disabled:
- curr->arch.hvm_vcpu.asid = 0;
+ asid->asid = 0;
return 0;
}
diff --git a/xen/arch/x86/hvm/svm/asid.c b/xen/arch/x86/hvm/svm/asid.c
index 1723866f11..ede2be6cc5 100644
--- a/xen/arch/x86/hvm/svm/asid.c
+++ b/xen/arch/x86/hvm/svm/asid.c
@@ -22,6 +22,7 @@
#include <xen/perfc.h>
#include <asm/hvm/svm/asid.h>
#include <asm/amd.h>
+#include <asm/hvm/nestedhvm.h>
void svm_asid_init(struct cpuinfo_x86 *c)
{
@@ -42,17 +43,20 @@ asmlinkage void svm_asid_handle_vmrun(void)
{
struct vcpu *curr = current;
struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
- bool_t need_flush = hvm_asid_handle_vmenter();
+ struct hvm_vcpu_asid *p_asid =
+ nestedhvm_vcpu_in_guestmode(curr)
+ ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm_vcpu.n1asid;
+ bool_t need_flush = hvm_asid_handle_vmenter(p_asid);
/* ASID 0 indicates that ASIDs are disabled. */
- if ( curr->arch.hvm_vcpu.asid == 0 )
+ if ( p_asid->asid == 0 )
{
vmcb_set_guest_asid(vmcb, 1);
vmcb->tlb_control = 1;
return;
}
- vmcb_set_guest_asid(vmcb, curr->arch.hvm_vcpu.asid);
+ vmcb_set_guest_asid(vmcb, p_asid->asid);
vmcb->tlb_control = need_flush;
}
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 425112a37e..fe03ab2f2c 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -261,8 +261,6 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
/* Cleanbits */
n1vmcb->cleanbits.bytes = 0;
- hvm_asid_flush_vcpu(v);
-
return 0;
}
@@ -408,9 +406,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
if (rc)
return rc;
- /* ASID */
- hvm_asid_flush_vcpu(v);
- /* n2vmcb->_guest_asid = ns_vmcb->_guest_asid; */
+ /* ASID - Emulation handled in hvm_asid_handle_vmenter() */
/* TLB control */
n2vmcb->tlb_control = n1vmcb->tlb_control | ns_vmcb->tlb_control;
@@ -605,9 +601,13 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs,
svm->ns_vmcb_guestcr3 = ns_vmcb->_cr3;
svm->ns_vmcb_hostcr3 = ns_vmcb->_h_cr3;
- nv->nv_flushp2m = (ns_vmcb->tlb_control
- || (svm->ns_guest_asid != ns_vmcb->_guest_asid));
- svm->ns_guest_asid = ns_vmcb->_guest_asid;
+ nv->nv_flushp2m = ns_vmcb->tlb_control;
+ if ( svm->ns_guest_asid != ns_vmcb->_guest_asid )
+ {
+ nv->nv_flushp2m = 1;
+ hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
+ svm->ns_guest_asid = ns_vmcb->_guest_asid;
+ }
/* nested paging for the guest */
svm->ns_hap_enabled = (ns_vmcb->_np_enable) ? 1 : 0;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 981e5c3217..b924c1e46f 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1580,6 +1580,15 @@ static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
__update_guest_eip(regs, inst_len);
}
+static void svm_invlpga_intercept(
+ struct vcpu *v, unsigned long vaddr, uint32_t asid)
+{
+ svm_invlpga(vaddr,
+ (asid == 0)
+ ? v->arch.hvm_vcpu.n1asid.asid
+ : vcpu_nestedhvm(v).nv_n2asid.asid);
+}
+
static void svm_invlpg_intercept(unsigned long vaddr)
{
struct vcpu *curr = current;
@@ -1894,11 +1903,14 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
case VMEXIT_INVLPG:
- case VMEXIT_INVLPGA:
if ( !handle_mmio() )
hvm_inject_exception(TRAP_gp_fault, 0, 0);
break;
+ case VMEXIT_INVLPGA:
+ svm_invlpga_intercept(v, regs->rax, regs->ecx);
+ break;
+
case VMEXIT_VMMCALL:
if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
break;
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 9b9921cf40..d139c1380d 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -867,9 +867,6 @@ static int construct_vmcs(struct vcpu *v)
#endif
}
- if ( cpu_has_vmx_vpid )
- __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vcpu.asid);
-
if ( cpu_has_vmx_pat && paging_mode_hap(d) )
{
u64 host_pat, guest_pat;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index cda6420f39..2f4c74e2b4 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2667,14 +2667,16 @@ asmlinkage void vmx_vmenter_helper(void)
{
struct vcpu *curr = current;
u32 new_asid, old_asid;
+ struct hvm_vcpu_asid *p_asid;
bool_t need_flush;
if ( !cpu_has_vmx_vpid )
goto out;
- old_asid = curr->arch.hvm_vcpu.asid;
- need_flush = hvm_asid_handle_vmenter();
- new_asid = curr->arch.hvm_vcpu.asid;
+ p_asid = &curr->arch.hvm_vcpu.n1asid;
+ old_asid = p_asid->asid;
+ need_flush = hvm_asid_handle_vmenter(p_asid);
+ new_asid = p_asid->asid;
if ( unlikely(new_asid != old_asid) )
{
diff --git a/xen/include/asm-x86/hvm/asid.h b/xen/include/asm-x86/hvm/asid.h
index 4ee520f1db..a01ebeb676 100644
--- a/xen/include/asm-x86/hvm/asid.h
+++ b/xen/include/asm-x86/hvm/asid.h
@@ -23,11 +23,15 @@
#include <xen/config.h>
struct vcpu;
+struct hvm_vcpu_asid;
/* Initialise ASID management for the current physical CPU. */
void hvm_asid_init(int nasids);
-/* Invalidate a VCPU's current ASID allocation: forces re-allocation. */
+/* Invalidate a particular ASID allocation: forces re-allocation. */
+void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid);
+
+/* Invalidate all ASID allocations for specified VCPU: forces re-allocation. */
void hvm_asid_flush_vcpu(struct vcpu *v);
/* Flush all ASIDs on this processor core. */
@@ -35,7 +39,7 @@ void hvm_asid_flush_core(void);
/* Called before entry to guest context. Checks ASID allocation, returns a
* boolean indicating whether all ASIDs must be flushed. */
-bool_t hvm_asid_handle_vmenter(void);
+bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid);
#endif /* __ASM_X86_HVM_ASID_H__ */
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index eabecaaccc..0282c01c3c 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -37,6 +37,11 @@ enum hvm_io_state {
HVMIO_completed
};
+struct hvm_vcpu_asid {
+ uint64_t generation;
+ uint32_t asid;
+};
+
#define VMCX_EADDR (~0ULL)
struct nestedvcpu {
@@ -57,6 +62,8 @@ struct nestedvcpu {
bool_t nv_flushp2m; /* True, when p2m table must be flushed */
struct p2m_domain *nv_p2m; /* used p2m table for this vcpu */
+ struct hvm_vcpu_asid nv_n2asid;
+
bool_t nv_vmentry_pending;
bool_t nv_vmexit_pending;
bool_t nv_vmswitch_in_progress; /* true during vmentry/vmexit emulation */
@@ -100,8 +107,7 @@ struct hvm_vcpu {
bool_t hcall_preempted;
bool_t hcall_64bit;
- uint64_t asid_generation;
- uint32_t asid;
+ struct hvm_vcpu_asid n1asid;
u32 msr_tsc_aux;
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 8e685e4dc6..23406fa230 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -377,7 +377,7 @@ static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva)
type = INVVPID_ALL_CONTEXT;
execute_invvpid:
- __invvpid(type, v->arch.hvm_vcpu.asid, (u64)gva);
+ __invvpid(type, v->arch.hvm_vcpu.n1asid.asid, (u64)gva);
}
static inline void vpid_sync_all(void)