aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDietmar Hahn <dietmar.hahn@ts.fujitsu.com>2013-04-08 17:55:49 +0200
committerJan Beulich <jbeulich@suse.com>2013-04-08 17:55:49 +0200
commit7ad0f74356cb45d5904cc3726fb552cecc9fe16f (patch)
tree5aed132d4db9578d3157b6383e677392e994a3bf
parent5753fe1c92efb3cc0142dadf8c82d5fb9ea80418 (diff)
downloadxen-7ad0f74356cb45d5904cc3726fb552cecc9fe16f.tar.gz
xen-7ad0f74356cb45d5904cc3726fb552cecc9fe16f.tar.bz2
xen-7ad0f74356cb45d5904cc3726fb552cecc9fe16f.zip
vpmu intel: Better names and replacing numerals with defines
This patch renames core2_counters to core2_fix_counters for better understanding the code and subtitutes 2 numerals with defines in fixed counter handling. Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Acked-by: Jun Nakajima <jun.nakajima@intel.com>
-rw-r--r--xen/arch/x86/hvm/vmx/vpmu_core2.c42
-rw-r--r--xen/include/asm-x86/hvm/vmx/vpmu_core2.h11
2 files changed, 29 insertions, 24 deletions
diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index 1c6a87c3b9..ceae50bc4f 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -101,7 +101,7 @@ static void handle_pmc_quirk(u64 msr_content)
}
}
-static const u32 core2_counters_msr[] = {
+static const u32 core2_fix_counters_msr[] = {
MSR_CORE_PERF_FIXED_CTR0,
MSR_CORE_PERF_FIXED_CTR1,
MSR_CORE_PERF_FIXED_CTR2
@@ -119,13 +119,13 @@ struct pmumsr {
const u32 *msr;
};
-static const struct pmumsr core2_counters = {
- 3,
- core2_counters_msr
+static const struct pmumsr core2_fix_counters = {
+ VPMU_CORE2_NUM_FIXED,
+ core2_fix_counters_msr
};
static const struct pmumsr core2_ctrls = {
- 3,
+ VPMU_CORE2_NUM_CTRLS,
core2_ctrls_msr
};
static int arch_pmc_cnt;
@@ -162,16 +162,16 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index)
{
int i;
- for ( i = 0; i < core2_counters.num; i++ )
+ for ( i = 0; i < core2_fix_counters.num; i++ )
{
- if ( core2_counters.msr[i] == msr_index )
+ if ( core2_fix_counters.msr[i] == msr_index )
{
*type = MSR_TYPE_COUNTER;
*index = i;
return 1;
}
}
-
+
for ( i = 0; i < core2_ctrls.num; i++ )
{
if ( core2_ctrls.msr[i] == msr_index )
@@ -214,10 +214,10 @@ static void core2_vpmu_set_msr_bitmap(unsigned long *msr_bitmap)
int i;
/* Allow Read/Write PMU Counters MSR Directly. */
- for ( i = 0; i < core2_counters.num; i++ )
+ for ( i = 0; i < core2_fix_counters.num; i++ )
{
- clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap);
- clear_bit(msraddr_to_bitpos(core2_counters.msr[i]),
+ clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap);
+ clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]),
msr_bitmap + 0x800/BYTES_PER_LONG);
}
for ( i = 0; i < core2_get_pmc_count(); i++ )
@@ -238,10 +238,10 @@ static void core2_vpmu_unset_msr_bitmap(unsigned long *msr_bitmap)
{
int i;
- for ( i = 0; i < core2_counters.num; i++ )
+ for ( i = 0; i < core2_fix_counters.num; i++ )
{
- set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap);
- set_bit(msraddr_to_bitpos(core2_counters.msr[i]),
+ set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap);
+ set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]),
msr_bitmap + 0x800/BYTES_PER_LONG);
}
for ( i = 0; i < core2_get_pmc_count(); i++ )
@@ -261,8 +261,8 @@ static inline void __core2_vpmu_save(struct vcpu *v)
int i;
struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
- for ( i = 0; i < core2_counters.num; i++ )
- rdmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]);
+ for ( i = 0; i < core2_fix_counters.num; i++ )
+ rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]);
for ( i = 0; i < core2_get_pmc_count(); i++ )
rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
@@ -292,8 +292,8 @@ static inline void __core2_vpmu_load(struct vcpu *v)
int i;
struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
- for ( i = 0; i < core2_counters.num; i++ )
- wrmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]);
+ for ( i = 0; i < core2_fix_counters.num; i++ )
+ wrmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]);
for ( i = 0; i < core2_get_pmc_count(); i++ )
wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
@@ -474,7 +474,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl);
global_ctrl = msr_content >> 32;
- for ( i = 0; i < 3; i++ )
+ for ( i = 0; i < core2_fix_counters.num; i++ )
{
core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] =
(global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0);
@@ -486,7 +486,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
non_global_ctrl = msr_content;
vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
global_ctrl >>= 32;
- for ( i = 0; i < 3; i++ )
+ for ( i = 0; i < core2_fix_counters.num; i++ )
{
core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] =
(global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0);
@@ -502,7 +502,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
(global_ctrl >> tmp) & (msr_content >> 22) & 1;
}
- for ( i = 0; i < 3; i++ )
+ for ( i = 0; i < core2_fix_counters.num; i++ )
pmu_enable |= core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i];
for ( i = 0; i < core2_get_pmc_count(); i++ )
pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i];
diff --git a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h
index 2a9f346a15..4128f2ac24 100644
--- a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h
+++ b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h
@@ -23,6 +23,11 @@
#ifndef __ASM_X86_HVM_VPMU_CORE_H_
#define __ASM_X86_HVM_VPMU_CORE_H_
+/* Currently only 3 fixed counters are supported. */
+#define VPMU_CORE2_NUM_FIXED 3
+/* Currently only 3 Non-architectual Performance Control MSRs */
+#define VPMU_CORE2_NUM_CTRLS 3
+
struct arch_msr_pair {
u64 counter;
u64 control;
@@ -30,14 +35,14 @@ struct arch_msr_pair {
struct core2_pmu_enable {
char ds_area_enable;
- char fixed_ctr_enable[3];
+ char fixed_ctr_enable[VPMU_CORE2_NUM_FIXED];
char arch_pmc_enable[1];
};
struct core2_vpmu_context {
struct core2_pmu_enable *pmu_enable;
- u64 counters[3];
- u64 ctrls[3];
+ u64 fix_counters[VPMU_CORE2_NUM_FIXED];
+ u64 ctrls[VPMU_CORE2_NUM_CTRLS];
u64 global_ovf_status;
u32 hw_lapic_lvtpc;
struct arch_msr_pair arch_msr_pair[1];