aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/svm
diff options
context:
space:
mode:
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>2013-04-15 11:24:52 +0200
committerJan Beulich <jbeulich@suse.com>2013-04-15 11:24:52 +0200
commit176706746dee6ad72f6bf15d2b6093973c652d6b (patch)
tree23874aec42dbaf438b4fa1dc9ce8dc3d9c21242a /xen/arch/x86/hvm/svm
parent45773c5fb6346b1bc2a2ddcc6d19bd7f53ccabff (diff)
downloadxen-176706746dee6ad72f6bf15d2b6093973c652d6b.tar.gz
xen-176706746dee6ad72f6bf15d2b6093973c652d6b.tar.bz2
xen-176706746dee6ad72f6bf15d2b6093973c652d6b.zip
x86/AMD: Load context when attempting to read VPMU MSRs
Load context (and mark it as LOADED) on any MSR access. This will allow us to always read the most up-to-date value of an MSR: guest may write into an MSR without enabling it (thus not marking the context as RUNNING) and then be migrated. Without first loading the context reading this MSR from HW will not match the pervious write since registers will not be loaded into HW in amd_vpmu_load(). In addition, we should be saving the context when it is LOADED, not RUNNING --- otherwise we need to save it any time it becomes non-RUNNING, which may be a frequent occurrence. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Diffstat (limited to 'xen/arch/x86/hvm/svm')
-rw-r--r--xen/arch/x86/hvm/svm/vpmu.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 1e54497c43..4be56524db 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -225,6 +225,8 @@ static void amd_vpmu_restore(struct vcpu *v)
context_restore(v);
apic_write(APIC_LVTPC, ctxt->hw_lapic_lvtpc);
+
+ vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
}
static inline void context_save(struct vcpu *v)
@@ -246,7 +248,7 @@ static void amd_vpmu_save(struct vcpu *v)
struct amd_vpmu_context *ctx = vpmu->context;
if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
- vpmu_is_set(vpmu, VPMU_RUNNING)) )
+ vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
return;
context_save(v);
@@ -256,6 +258,7 @@ static void amd_vpmu_save(struct vcpu *v)
ctx->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, ctx->hw_lapic_lvtpc | APIC_LVT_MASKED);
+ vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
}
static void context_update(unsigned int msr, u64 msr_content)
@@ -318,6 +321,12 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
release_pmu_ownship(PMU_OWNER_HVM);
}
+ if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
+ {
+ context_restore(v);
+ vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
+ }
+
/* Update vpmu context immediately */
context_update(msr, msr_content);
@@ -328,7 +337,17 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
{
+ struct vcpu *v = current;
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+ if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
+ {
+ context_restore(v);
+ vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
+ }
+
rdmsrl(msr, *msr_content);
+
return 1;
}