diff options
author | Dietmar Hahn <dietmar.hahn@ts.fujitsu.com> | 2013-03-26 14:24:25 +0100 |
---|---|---|
committer | Jan Beulich <jbeulich@suse.com> | 2013-03-26 14:24:25 +0100 |
commit | 25250ed7c8094a905c5f03b8ae25c8694d3fa9b3 (patch) | |
tree | dadd1ea3edda3aa7e14b5113f09dd7890b8fd005 /xen/arch/x86/hvm/vpmu.c | |
parent | db537fe3023bf157b85c8246782cb72a6f989b31 (diff) | |
download | xen-25250ed7c8094a905c5f03b8ae25c8694d3fa9b3.tar.gz xen-25250ed7c8094a905c5f03b8ae25c8694d3fa9b3.tar.bz2 xen-25250ed7c8094a905c5f03b8ae25c8694d3fa9b3.zip |
vpmu intel: Add cpuid handling when vpmu disabled
Even though vpmu is disabled in the hypervisor in the HVM guest the call of
cpuid(0xa) returns informations about usable performance counters.
This may confuse guest software when trying to use the counters and nothing
happens.
This patch clears most bits in registers eax and edx of cpuid(0xa) instruction
for the guest when vpmu is disabled:
- version ID of architectural performance counting
- number of general pmu registers
- width of general pmu registers
- number of fixed pmu registers
- width of ixed pmu registers
Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/hvm/vpmu.c')
-rw-r--r-- | xen/arch/x86/hvm/vpmu.c | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c index e9a881bdce..0b843c15e5 100644 --- a/xen/arch/x86/hvm/vpmu.c +++ b/xen/arch/x86/hvm/vpmu.c @@ -67,7 +67,7 @@ int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); return 0; } @@ -76,7 +76,7 @@ int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); return 0; } @@ -85,7 +85,7 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs) { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt ) return vpmu->arch_vpmu_ops->do_interrupt(regs); return 0; } @@ -104,7 +104,7 @@ void vpmu_save(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save ) vpmu->arch_vpmu_ops->arch_vpmu_save(v); } @@ -112,7 +112,7 @@ void vpmu_load(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) vpmu->arch_vpmu_ops->arch_vpmu_load(v); } @@ -121,9 +121,6 @@ void vpmu_initialise(struct vcpu *v) struct vpmu_struct *vpmu = vcpu_vpmu(v); uint8_t vendor = current_cpu_data.x86_vendor; - if ( !opt_vpmu_enabled ) - return; - if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) vpmu_destroy(v); vpmu_clear(vpmu); @@ -153,7 +150,7 @@ void vpmu_destroy(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); } |