aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/hvm/svm/vpmu.c37
-rw-r--r--xen/arch/x86/hvm/vmx/vpmu_core2.c30
-rw-r--r--xen/arch/x86/hvm/vpmu.c50
-rw-r--r--xen/include/asm-x86/hvm/vmx/vpmu_core2.h1
-rw-r--r--xen/include/asm-x86/hvm/vpmu.h1
5 files changed, 47 insertions, 72 deletions
diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 51e5495bc6..f2f2d125fd 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -87,7 +87,6 @@ static const u32 AMD_F15H_CTRLS[] = {
struct amd_vpmu_context {
u64 counters[MAX_NUM_COUNTERS];
u64 ctrls[MAX_NUM_COUNTERS];
- u32 hw_lapic_lvtpc;
bool_t msr_bitmap_set;
};
@@ -171,22 +170,6 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
{
- struct vcpu *v = current;
- struct vlapic *vlapic = vcpu_vlapic(v);
- u32 vlapic_lvtpc;
- unsigned char int_vec;
-
- if ( !is_vlapic_lvtpc_enabled(vlapic) )
- return 0;
-
- vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
- int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
-
- if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
- vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
- else
- v->nmi_pending = 1;
-
return 1;
}
@@ -205,17 +188,7 @@ static inline void context_restore(struct vcpu *v)
static void amd_vpmu_restore(struct vcpu *v)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct amd_vpmu_context *ctxt = vpmu->context;
-
- if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
- vpmu_is_set(vpmu, VPMU_RUNNING)) )
- return;
-
- apic_write(APIC_LVTPC, ctxt->hw_lapic_lvtpc);
context_restore(v);
-
- vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
}
static inline void context_save(struct vcpu *v)
@@ -237,18 +210,10 @@ static void amd_vpmu_save(struct vcpu *v)
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct amd_vpmu_context *ctx = vpmu->context;
- if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
- vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
- return;
-
context_save(v);
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
amd_vpmu_unset_msr_bitmap(v);
-
- ctx->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
- apic_write(APIC_LVTPC, ctx->hw_lapic_lvtpc | APIC_LVT_MASKED);
- vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
}
static void context_update(unsigned int msr, u64 msr_content)
@@ -271,8 +236,6 @@ static void context_update(unsigned int msr, u64 msr_content)
for ( i = 0; i < num_counters; i++ )
if ( msr == ctrls[i] )
ctxt->ctrls[i] = msr_content;
-
- ctxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
}
static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index 7c86a0b6b1..6195bfc477 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -305,25 +305,18 @@ static inline void __core2_vpmu_save(struct vcpu *v)
rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]);
for ( i = 0; i < core2_get_pmc_count(); i++ )
rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
- core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
- apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
}
static void core2_vpmu_save(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
- vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
- return;
-
__core2_vpmu_save(v);
/* Unset PMU MSR bitmap to trap lazy load. */
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
- vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
return;
}
@@ -341,20 +334,11 @@ static inline void __core2_vpmu_load(struct vcpu *v)
wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]);
for ( i = 0; i < core2_get_pmc_count(); i++ )
wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control);
-
- apic_write_around(APIC_LVTPC, core2_vpmu_cxt->hw_lapic_lvtpc);
}
static void core2_vpmu_load(struct vcpu *v)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
-
- /* Only when PMU is counting, we load PMU context immediately. */
- if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
- vpmu_is_set(vpmu, VPMU_RUNNING)) )
- return;
__core2_vpmu_load(v);
- vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
}
static int core2_vpmu_alloc_resource(struct vcpu *v)
@@ -705,11 +689,8 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
u64 msr_content;
- u32 vlapic_lvtpc;
- unsigned char int_vec;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
- struct vlapic *vlapic = vcpu_vlapic(v);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, msr_content);
if ( msr_content )
@@ -728,18 +709,9 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
return 0;
}
+ /* HW sets the MASK bit when performance counter interrupt occurs*/
apic_write_around(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
- if ( !is_vlapic_lvtpc_enabled(vlapic) )
- return 1;
-
- vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
- int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
- vlapic_set_reg(vlapic, APIC_LVTPC, vlapic_lvtpc | APIC_LVT_MASKED);
- if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
- vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
- else
- v->nmi_pending = 1;
return 1;
}
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 3b6958081e..ff5f065c3a 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -31,7 +31,7 @@
#include <asm/hvm/vpmu.h>
#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/vmcb.h>
-
+#include <asm/apic.h>
/*
* "vpmu" : vpmu generally enabled
@@ -83,10 +83,31 @@ int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
int vpmu_do_interrupt(struct cpu_user_regs *regs)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(current);
+ struct vcpu *v = current;
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+ if ( vpmu->arch_vpmu_ops )
+ {
+ struct vlapic *vlapic = vcpu_vlapic(v);
+ u32 vlapic_lvtpc;
+ unsigned char int_vec;
+
+ if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) )
+ return 0;
+
+ if ( !is_vlapic_lvtpc_enabled(vlapic) )
+ return 1;
+
+ vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
+ int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
+
+ if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
+ vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
+ else
+ v->nmi_pending = 1;
+ return 1;
+ }
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt )
- return vpmu->arch_vpmu_ops->do_interrupt(regs);
return 0;
}
@@ -104,16 +125,35 @@ void vpmu_save(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save )
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
+ return;
+
+ if ( vpmu->arch_vpmu_ops )
vpmu->arch_vpmu_ops->arch_vpmu_save(v);
+
+ vpmu->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
+ apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
+
+ vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
}
void vpmu_load(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ /* Only when PMU is counting, we load PMU context immediately. */
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_RUNNING)) )
+ return;
+
if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
+ {
+ apic_write_around(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
vpmu->arch_vpmu_ops->arch_vpmu_load(v);
+ }
+
+ vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
}
void vpmu_initialise(struct vcpu *v)
diff --git a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h
index 4128f2ac24..60b05fd9bc 100644
--- a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h
+++ b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h
@@ -44,7 +44,6 @@ struct core2_vpmu_context {
u64 fix_counters[VPMU_CORE2_NUM_FIXED];
u64 ctrls[VPMU_CORE2_NUM_CTRLS];
u64 global_ovf_status;
- u32 hw_lapic_lvtpc;
struct arch_msr_pair arch_msr_pair[1];
};
diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h
index cd31f5eb0b..01be97683e 100644
--- a/xen/include/asm-x86/hvm/vpmu.h
+++ b/xen/include/asm-x86/hvm/vpmu.h
@@ -62,6 +62,7 @@ int svm_vpmu_initialise(struct vcpu *, unsigned int flags);
struct vpmu_struct {
u32 flags;
+ u32 hw_lapic_lvtpc;
void *context;
struct arch_vpmu_ops *arch_vpmu_ops;
};