aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/svm
diff options
context:
space:
mode:
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>2013-04-15 11:25:18 +0200
committerJan Beulich <jbeulich@suse.com>2013-04-15 11:25:18 +0200
commit5fb2decd2d3a821a7e9cff8c53220eaa45557d8f (patch)
treef406281d4d878edb9bb150a32ce01ddac577a4f0 /xen/arch/x86/hvm/svm
parent176706746dee6ad72f6bf15d2b6093973c652d6b (diff)
downloadxen-5fb2decd2d3a821a7e9cff8c53220eaa45557d8f.tar.gz
xen-5fb2decd2d3a821a7e9cff8c53220eaa45557d8f.tar.bz2
xen-5fb2decd2d3a821a7e9cff8c53220eaa45557d8f.zip
x86/AMD: Stop counters on VPMU save
Stop the counters during VPMU save operation since they shouldn't be running when VPCU that controls them is not. This also makes it unnecessary to check for overflow in context_restore() Set LVTPC vector before loading the context during vpmu_restore(). Otherwise it is possible to trigger an interrupt without proper vector. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Diffstat (limited to 'xen/arch/x86/hvm/svm')
-rw-r--r--xen/arch/x86/hvm/svm/vpmu.c22
1 files changed, 6 insertions, 16 deletions
diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 4be56524db..51e5495bc6 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -197,20 +197,9 @@ static inline void context_restore(struct vcpu *v)
struct amd_vpmu_context *ctxt = vpmu->context;
for ( i = 0; i < num_counters; i++ )
- wrmsrl(ctrls[i], ctxt->ctrls[i]);
-
- for ( i = 0; i < num_counters; i++ )
{
wrmsrl(counters[i], ctxt->counters[i]);
-
- /* Force an interrupt to allow guest reset the counter,
- if the value is positive */
- if ( is_overflowed(ctxt->counters[i]) && (ctxt->counters[i] > 0) )
- {
- gdprintk(XENLOG_WARNING, "VPMU: Force a performance counter "
- "overflow interrupt!\n");
- amd_vpmu_do_interrupt(0);
- }
+ wrmsrl(ctrls[i], ctxt->ctrls[i]);
}
}
@@ -223,8 +212,8 @@ static void amd_vpmu_restore(struct vcpu *v)
vpmu_is_set(vpmu, VPMU_RUNNING)) )
return;
- context_restore(v);
apic_write(APIC_LVTPC, ctxt->hw_lapic_lvtpc);
+ context_restore(v);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
}
@@ -236,10 +225,11 @@ static inline void context_save(struct vcpu *v)
struct amd_vpmu_context *ctxt = vpmu->context;
for ( i = 0; i < num_counters; i++ )
- rdmsrl(counters[i], ctxt->counters[i]);
-
- for ( i = 0; i < num_counters; i++ )
+ {
rdmsrl(ctrls[i], ctxt->ctrls[i]);
+ wrmsrl(ctrls[i], 0);
+ rdmsrl(counters[i], ctxt->counters[i]);
+ }
}
static void amd_vpmu_save(struct vcpu *v)