aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/svm
diff options
context:
space:
mode:
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>2013-04-15 11:28:08 +0200
committerJan Beulich <jbeulich@suse.com>2013-04-15 11:28:08 +0200
commit1df002bb2f218baee6fa4116fa8a7994a49daa91 (patch)
tree2f460c2cfd165a9bb1e81f854d875ec7d4eeb67d /xen/arch/x86/hvm/svm
parent055898c9c61d462888941eaede436db4d335890e (diff)
downloadxen-1df002bb2f218baee6fa4116fa8a7994a49daa91.tar.gz
xen-1df002bb2f218baee6fa4116fa8a7994a49daa91.tar.bz2
xen-1df002bb2f218baee6fa4116fa8a7994a49daa91.zip
x86/AMD: Clean up context_update() in AMD VPMU code
Clean up context_update() in AMD VPMU code. Rename restore routine to "load" to be consistent with Intel code and with arch_vpmu_ops names Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Diffstat (limited to 'xen/arch/x86/hvm/svm')
-rw-r--r--xen/arch/x86/hvm/svm/vpmu.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index efe2b7cfd4..b36ab2b1a9 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -173,7 +173,7 @@ static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
return 1;
}
-static inline void context_restore(struct vcpu *v)
+static inline void context_load(struct vcpu *v)
{
unsigned int i;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
@@ -186,7 +186,7 @@ static inline void context_restore(struct vcpu *v)
}
}
-static void amd_vpmu_restore(struct vcpu *v)
+static void amd_vpmu_load(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct amd_vpmu_context *ctxt = vpmu->context;
@@ -203,7 +203,7 @@ static void amd_vpmu_restore(struct vcpu *v)
return;
}
- context_restore(v);
+ context_load(v);
}
static inline void context_save(struct vcpu *v)
@@ -262,12 +262,18 @@ static void context_update(unsigned int msr, u64 msr_content)
}
for ( i = 0; i < num_counters; i++ )
- if ( msr == counters[i] )
+ {
+ if ( msr == ctrls[i] )
+ {
+ ctxt->ctrls[i] = msr_content;
+ return;
+ }
+ else if (msr == counters[i] )
+ {
ctxt->counters[i] = msr_content;
-
- for ( i = 0; i < num_counters; i++ )
- if ( msr == ctrls[i] )
- ctxt->ctrls[i] = msr_content;
+ return;
+ }
+ }
}
static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
@@ -311,7 +317,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
|| vpmu_is_set(vpmu, VPMU_FROZEN) )
{
- context_restore(v);
+ context_load(v);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
vpmu_reset(vpmu, VPMU_FROZEN);
}
@@ -332,7 +338,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
|| vpmu_is_set(vpmu, VPMU_FROZEN) )
{
- context_restore(v);
+ context_load(v);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
vpmu_reset(vpmu, VPMU_FROZEN);
}
@@ -414,7 +420,7 @@ struct arch_vpmu_ops amd_vpmu_ops = {
.do_interrupt = amd_vpmu_do_interrupt,
.arch_vpmu_destroy = amd_vpmu_destroy,
.arch_vpmu_save = amd_vpmu_save,
- .arch_vpmu_load = amd_vpmu_restore
+ .arch_vpmu_load = amd_vpmu_load
};
int svm_vpmu_initialise(struct vcpu *v, unsigned int vpmu_flags)