aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-12-08 14:14:27 +0000
committerKeir Fraser <keir.fraser@citrix.com>2009-12-08 14:14:27 +0000
commit7aab6d6523ecc31156d3f36f55e599e0c4f73325 (patch)
treefb60fea0540e31cd8ae66b1c044f45024b673915
parentc4a0c89b8c23b3e9f5b2ab9ad72a264b48774811 (diff)
downloadxen-7aab6d6523ecc31156d3f36f55e599e0c4f73325.tar.gz
xen-7aab6d6523ecc31156d3f36f55e599e0c4f73325.tar.bz2
xen-7aab6d6523ecc31156d3f36f55e599e0c4f73325.zip
hvm: Share ASID logic between VMX and SVM.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
-rw-r--r--xen/arch/x86/hvm/asid.c6
-rw-r--r--xen/arch/x86/hvm/hvm.c2
-rw-r--r--xen/arch/x86/hvm/svm/svm.c13
-rw-r--r--xen/arch/x86/hvm/svm/vmcb.c3
-rw-r--r--xen/arch/x86/hvm/vmx/entry.S2
-rw-r--r--xen/arch/x86/hvm/vmx/vmcs.c12
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c109
-rw-r--r--xen/include/asm-x86/hvm/asid.h6
-rw-r--r--xen/include/asm-x86/hvm/hvm.h18
-rw-r--r--xen/include/asm-x86/hvm/svm/asid.h2
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmcs.h2
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmx.h12
12 files changed, 72 insertions, 115 deletions
diff --git a/xen/arch/x86/hvm/asid.c b/xen/arch/x86/hvm/asid.c
index 85754d18bb..69f3f577f8 100644
--- a/xen/arch/x86/hvm/asid.c
+++ b/xen/arch/x86/hvm/asid.c
@@ -20,7 +20,9 @@
#include <xen/config.h>
#include <xen/init.h>
#include <xen/lib.h>
-#include <xen/perfc.h>
+#include <xen/sched.h>
+#include <xen/smp.h>
+#include <xen/percpu.h>
#include <asm/hvm/asid.h>
/*
@@ -80,7 +82,7 @@ void hvm_asid_init(int nasids)
data->next_asid = 1;
}
-void hvm_asid_invalidate_asid(struct vcpu *v)
+void hvm_asid_flush_vcpu(struct vcpu *v)
{
v->arch.hvm_vcpu.asid_generation = 0;
}
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 42121fec80..d2e8a7162e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -756,6 +756,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
{
int rc;
+ hvm_asid_flush_vcpu(v);
+
if ( cpu_has_xsave )
{
/* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 7695f3b831..c297b0b509 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -424,7 +424,7 @@ static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
break;
case 3:
vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
- hvm_asid_invalidate_asid(v);
+ hvm_asid_flush_vcpu(v);
break;
case 4:
vmcb->cr4 = HVM_CR4_HOST_MASK;
@@ -455,14 +455,6 @@ static void svm_update_guest_efer(struct vcpu *v)
svm_intercept_msr(v, MSR_IA32_SYSENTER_EIP, lma);
}
-static void svm_flush_guest_tlbs(void)
-{
- /* Roll over the CPU's ASID generation, so it gets a clean TLB when we
- * next VMRUN. (If ASIDs are disabled, the whole TLB is flushed on
- * VMRUN anyway). */
- hvm_asid_flush_core();
-}
-
static void svm_sync_vmcb(struct vcpu *v)
{
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
@@ -704,7 +696,7 @@ static void svm_do_resume(struct vcpu *v)
hvm_migrate_timers(v);
/* Migrating to another ASID domain. Request a new ASID. */
- hvm_asid_invalidate_asid(v);
+ hvm_asid_flush_vcpu(v);
}
/* Reflect the vlapic's TPR in the hardware vtpr */
@@ -1250,7 +1242,6 @@ static struct hvm_function_table __read_mostly svm_function_table = {
.update_host_cr3 = svm_update_host_cr3,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
- .flush_guest_tlbs = svm_flush_guest_tlbs,
.set_tsc_offset = svm_set_tsc_offset,
.inject_exception = svm_inject_exception,
.init_hypercall_page = svm_init_hypercall_page,
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 71302cc194..9218028221 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -114,9 +114,6 @@ static int construct_vmcb(struct vcpu *v)
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
struct vmcb_struct *vmcb = arch_svm->vmcb;
- /* TLB control, and ASID assigment. */
- hvm_asid_invalidate_asid(v);
-
vmcb->general1_intercepts =
GENERAL1_INTERCEPT_INTR | GENERAL1_INTERCEPT_NMI |
GENERAL1_INTERCEPT_SMI | GENERAL1_INTERCEPT_INIT |
diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S
index 8720efccee..9fb7ecb97c 100644
--- a/xen/arch/x86/hvm/vmx/entry.S
+++ b/xen/arch/x86/hvm/vmx/entry.S
@@ -142,9 +142,9 @@ vmx_asm_do_vmentry:
call_with_regs(vmx_enter_realmode)
.Lvmx_not_realmode:
+ call vmx_vmenter_helper
mov VCPU_hvm_guest_cr2(r(bx)),r(ax)
mov r(ax),%cr2
- call vmx_trace_vmentry
lea UREGS_rip(r(sp)),r(di)
mov $GUEST_RIP,%eax
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index bcbf7ccb65..8c974b79ca 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -400,9 +400,12 @@ int vmx_cpu_up(void)
BUG();
}
+ hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0);
+
ept_sync_all();
- vpid_sync_all();
+ if ( cpu_has_vmx_vpid )
+ vpid_sync_all();
return 1;
}
@@ -559,6 +562,9 @@ static int construct_vmcs(struct vcpu *v)
v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
+ /* Disable VPID for now: we decide when to enable it on VMENTER. */
+ v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
+
if ( paging_mode_hap(d) )
{
v->arch.hvm_vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING |
@@ -736,7 +742,7 @@ static int construct_vmcs(struct vcpu *v)
}
if ( cpu_has_vmx_vpid )
- __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vmx.vpid);
+ __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vcpu.asid);
if ( cpu_has_vmx_pat && paging_mode_hap(d) )
{
@@ -946,7 +952,7 @@ void vmx_do_resume(struct vcpu *v)
hvm_migrate_timers(v);
hvm_migrate_pirqs(v);
vmx_set_host_env(v);
- vpid_sync_vcpu_all(v);
+ hvm_asid_flush_vcpu(v);
}
debug_state = v->domain->debugger_attached;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 20c30c5612..e2b55a5a04 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -60,8 +60,6 @@ static void vmx_ctxt_switch_to(struct vcpu *v);
static int vmx_alloc_vlapic_mapping(struct domain *d);
static void vmx_free_vlapic_mapping(struct domain *d);
-static int vmx_alloc_vpid(struct vcpu *v);
-static void vmx_free_vpid(struct vcpu *v);
static void vmx_install_vlapic_mapping(struct vcpu *v);
static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
static void vmx_update_guest_efer(struct vcpu *v);
@@ -104,9 +102,6 @@ static int vmx_vcpu_initialise(struct vcpu *v)
spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
- if ( (rc = vmx_alloc_vpid(v)) != 0 )
- return rc;
-
v->arch.schedule_tail = vmx_do_resume;
v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
@@ -116,7 +111,6 @@ static int vmx_vcpu_initialise(struct vcpu *v)
dprintk(XENLOG_WARNING,
"Failed to create VMCS for vcpu %d: err=%d.\n",
v->vcpu_id, rc);
- vmx_free_vpid(v);
return rc;
}
@@ -136,7 +130,6 @@ static void vmx_vcpu_destroy(struct vcpu *v)
vmx_destroy_vmcs(v);
vpmu_destroy(v);
passive_domain_destroy(v);
- vmx_free_vpid(v);
}
#ifdef __x86_64__
@@ -1168,7 +1161,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
}
__vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
- vpid_sync_vcpu_all(v);
+ hvm_asid_flush_vcpu(v);
break;
case 4:
v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
@@ -1214,19 +1207,6 @@ static void vmx_update_guest_efer(struct vcpu *v)
(v->arch.hvm_vcpu.guest_efer & EFER_SCE));
}
-static void vmx_flush_guest_tlbs(void)
-{
- /*
- * If VPID (i.e. tagged TLB support) is not enabled, the fact that
- * we're in Xen at all means any guest will have a clean TLB when
- * it's next run, because VMRESUME will flush it for us.
- *
- * If enabled, we invalidate all translations associated with all
- * VPID values.
- */
- vpid_sync_all();
-}
-
static void __ept_sync_domain(void *info)
{
struct domain *d = info;
@@ -1358,7 +1338,7 @@ static void vmx_set_uc_mode(struct vcpu *v)
if ( paging_mode_hap(v->domain) )
ept_change_entry_emt_with_range(
v->domain, 0, v->domain->arch.p2m->max_mapped_pfn);
- vpid_sync_all();
+ hvm_asid_flush_vcpu(v);
}
static void vmx_set_info_guest(struct vcpu *v)
@@ -1405,7 +1385,6 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
.update_host_cr3 = vmx_update_host_cr3,
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
- .flush_guest_tlbs = vmx_flush_guest_tlbs,
.set_tsc_offset = vmx_set_tsc_offset,
.inject_exception = vmx_inject_exception,
.init_hypercall_page = vmx_init_hypercall_page,
@@ -1424,9 +1403,6 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
.set_rdtsc_exiting = vmx_set_rdtsc_exiting
};
-static unsigned long *vpid_bitmap;
-#define VPID_BITMAP_SIZE (1u << VMCS_VPID_WIDTH)
-
void start_vmx(void)
{
static bool_t bootstrapped;
@@ -1461,17 +1437,6 @@ void start_vmx(void)
if ( cpu_has_vmx_ept )
vmx_function_table.hap_supported = 1;
- if ( cpu_has_vmx_vpid )
- {
- vpid_bitmap = xmalloc_array(
- unsigned long, BITS_TO_LONGS(VPID_BITMAP_SIZE));
- BUG_ON(vpid_bitmap == NULL);
- memset(vpid_bitmap, 0, BITS_TO_LONGS(VPID_BITMAP_SIZE) * sizeof(long));
-
- /* VPID 0 is used by VMX root mode (the hypervisor). */
- __set_bit(0, vpid_bitmap);
- }
-
setup_vmcs_dump();
hvm_enable(&vmx_function_table);
@@ -1584,7 +1549,7 @@ static void vmx_invlpg_intercept(unsigned long vaddr)
{
struct vcpu *curr = current;
HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
- if ( paging_invlpg(curr, vaddr) )
+ if ( paging_invlpg(curr, vaddr) && cpu_has_vmx_vpid )
vpid_sync_vcpu_gva(curr, vaddr);
}
@@ -1931,36 +1896,6 @@ static void vmx_free_vlapic_mapping(struct domain *d)
free_xenheap_page(mfn_to_virt(mfn));
}
-static int vmx_alloc_vpid(struct vcpu *v)
-{
- int idx;
-
- if ( !cpu_has_vmx_vpid )
- return 0;
-
- do {
- idx = find_first_zero_bit(vpid_bitmap, VPID_BITMAP_SIZE);
- if ( idx >= VPID_BITMAP_SIZE )
- {
- dprintk(XENLOG_WARNING, "VMX VPID space exhausted.\n");
- return -EBUSY;
- }
- }
- while ( test_and_set_bit(idx, vpid_bitmap) );
-
- v->arch.hvm_vmx.vpid = idx;
- return 0;
-}
-
-static void vmx_free_vpid(struct vcpu *v)
-{
- if ( !cpu_has_vmx_vpid )
- return;
-
- if ( v->arch.hvm_vmx.vpid )
- clear_bit(v->arch.hvm_vmx.vpid, vpid_bitmap);
-}
-
static void vmx_install_vlapic_mapping(struct vcpu *v)
{
paddr_t virt_page_ma, apic_page_ma;
@@ -2675,8 +2610,44 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
}
}
-asmlinkage void vmx_trace_vmentry(void)
+asmlinkage void vmx_vmenter_helper(void)
{
+ struct vcpu *curr = current;
+ u32 new_asid, old_asid;
+ bool_t need_flush;
+
+ if ( !cpu_has_vmx_vpid )
+ goto out;
+
+ old_asid = curr->arch.hvm_vcpu.asid;
+ need_flush = hvm_asid_handle_vmenter();
+ new_asid = curr->arch.hvm_vcpu.asid;
+
+ if ( unlikely(new_asid != old_asid) )
+ {
+ __vmwrite(VIRTUAL_PROCESSOR_ID, new_asid);
+ if ( !old_asid && new_asid )
+ {
+ /* VPID was disabled: now enabled. */
+ curr->arch.hvm_vmx.secondary_exec_control |=
+ SECONDARY_EXEC_ENABLE_VPID;
+ __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+ curr->arch.hvm_vmx.secondary_exec_control);
+ }
+ else if ( old_asid && !new_asid )
+ {
+ /* VPID was enabled: now disabled. */
+ curr->arch.hvm_vmx.secondary_exec_control &=
+ ~SECONDARY_EXEC_ENABLE_VPID;
+ __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+ curr->arch.hvm_vmx.secondary_exec_control);
+ }
+ }
+
+ if ( unlikely(need_flush) )
+ vpid_sync_all();
+
+ out:
HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
}
diff --git a/xen/include/asm-x86/hvm/asid.h b/xen/include/asm-x86/hvm/asid.h
index 336e61dd54..4ee520f1db 100644
--- a/xen/include/asm-x86/hvm/asid.h
+++ b/xen/include/asm-x86/hvm/asid.h
@@ -21,14 +21,14 @@
#define __ASM_X86_HVM_ASID_H__
#include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/processor.h>
+
+struct vcpu;
/* Initialise ASID management for the current physical CPU. */
void hvm_asid_init(int nasids);
/* Invalidate a VCPU's current ASID allocation: forces re-allocation. */
-void hvm_asid_invalidate_asid(struct vcpu *v);
+void hvm_asid_flush_vcpu(struct vcpu *v);
/* Flush all ASIDs on this processor core. */
void hvm_asid_flush_core(void);
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index cbff2e0a0d..8643f92926 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -23,6 +23,7 @@
#include <asm/current.h>
#include <asm/x86_emulate.h>
+#include <asm/hvm/asid.h>
#include <public/domctl.h>
#include <public/hvm/save.h>
@@ -100,13 +101,6 @@ struct hvm_function_table {
void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
void (*update_guest_efer)(struct vcpu *v);
- /*
- * Called to ensure than all guest-specific mappings in a tagged TLB
- * are flushed; does *not* flush Xen's TLB entries, and on
- * processors without a tagged TLB it will be a noop.
- */
- void (*flush_guest_tlbs)(void);
-
void (*set_tsc_offset)(struct vcpu *v, u64 offset);
void (*inject_exception)(unsigned int trapnr, int errcode,
@@ -201,11 +195,15 @@ static inline void hvm_update_guest_efer(struct vcpu *v)
hvm_funcs.update_guest_efer(v);
}
-static inline void
-hvm_flush_guest_tlbs(void)
+/*
+ * Called to ensure than all guest-specific mappings in a tagged TLB are
+ * flushed; does *not* flush Xen's TLB entries, and on processors without a
+ * tagged TLB it will be a noop.
+ */
+static inline void hvm_flush_guest_tlbs(void)
{
if ( hvm_enabled )
- hvm_funcs.flush_guest_tlbs();
+ hvm_asid_flush_core();
}
void hvm_hypercall_page_initialise(struct domain *d,
diff --git a/xen/include/asm-x86/hvm/svm/asid.h b/xen/include/asm-x86/hvm/svm/asid.h
index 25a8835e7e..a484f3eff3 100644
--- a/xen/include/asm-x86/hvm/svm/asid.h
+++ b/xen/include/asm-x86/hvm/svm/asid.h
@@ -41,7 +41,7 @@ static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
#endif
/* Safe fallback. Take a new ASID. */
- hvm_asid_invalidate_asid(v);
+ hvm_asid_flush_vcpu(v);
}
#endif /* __ASM_X86_HVM_SVM_ASID_H__ */
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index f7c3c78cf7..a24985bd64 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -90,8 +90,6 @@ struct arch_vmx_struct {
u32 exec_control;
u32 secondary_exec_control;
- u16 vpid;
-
/* PMU */
struct vpmu_struct vpmu;
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index ddda6c09aa..8894dbf6d8 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -314,20 +314,12 @@ void ept_sync_domain(struct domain *d);
static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva)
{
- if ( cpu_has_vmx_vpid )
- __invvpid(0, v->arch.hvm_vmx.vpid, (u64)gva);
-}
-
-static inline void vpid_sync_vcpu_all(struct vcpu *v)
-{
- if ( cpu_has_vmx_vpid )
- __invvpid(1, v->arch.hvm_vmx.vpid, 0);
+ __invvpid(0, v->arch.hvm_vcpu.asid, (u64)gva);
}
static inline void vpid_sync_all(void)
{
- if ( cpu_has_vmx_vpid )
- __invvpid(2, 0, 0);
+ __invvpid(2, 0, 0);
}
static inline void __vmxoff(void)