diff options
author | Christoph Egger <Christoph.Egger@amd.com> | 2012-09-26 12:07:42 +0200 |
---|---|---|
committer | Christoph Egger <Christoph.Egger@amd.com> | 2012-09-26 12:07:42 +0200 |
commit | be3e4ed45e1bb0ec9aa2dfcd5450eec6ccc56dac (patch) | |
tree | 36a4c66187f472579b1db2029022f415d9c095fc | |
parent | 19b03acdd1cdb8a6c35a30079ae6d6fb72caa285 (diff) | |
download | xen-be3e4ed45e1bb0ec9aa2dfcd5450eec6ccc56dac.tar.gz xen-be3e4ed45e1bb0ec9aa2dfcd5450eec6ccc56dac.tar.bz2 xen-be3e4ed45e1bb0ec9aa2dfcd5450eec6ccc56dac.zip |
x86/vMCE: Add AMD support
Add vMCE support for AMD. Add vmce namespace to Intel specific vMCE MSR
functions. Move vMCE prototypes from mce.h to vmce.h.
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
- fix inverted return values from vmce_amd_{rd,wr}msr()
- remove bogus printk()-s from those functions
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Committed-by: Jan Beulich <jbeulich@suse.com>
-rw-r--r-- | xen/arch/x86/cpu/mcheck/amd_f10.c | 25 | ||||
-rw-r--r-- | xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 1 | ||||
-rw-r--r-- | xen/arch/x86/cpu/mcheck/mce.c | 1 | ||||
-rw-r--r-- | xen/arch/x86/cpu/mcheck/mce.h | 15 | ||||
-rw-r--r-- | xen/arch/x86/cpu/mcheck/mce_intel.c | 5 | ||||
-rw-r--r-- | xen/arch/x86/cpu/mcheck/non-fatal.c | 1 | ||||
-rw-r--r-- | xen/arch/x86/cpu/mcheck/vmce.c | 16 | ||||
-rw-r--r-- | xen/arch/x86/cpu/mcheck/vmce.h | 23 |
8 files changed, 67 insertions, 20 deletions
diff --git a/xen/arch/x86/cpu/mcheck/amd_f10.c b/xen/arch/x86/cpu/mcheck/amd_f10.c index d73d5ba47b..3c807f585d 100644 --- a/xen/arch/x86/cpu/mcheck/amd_f10.c +++ b/xen/arch/x86/cpu/mcheck/amd_f10.c @@ -104,3 +104,28 @@ enum mcheck_type amd_f10_mcheck_init(struct cpuinfo_x86 *c) return mcheck_amd_famXX; } + +/* amd specific MCA MSR */ +int vmce_amd_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) +{ + switch (msr) { + case MSR_F10_MC4_MISC1: + case MSR_F10_MC4_MISC2: + case MSR_F10_MC4_MISC3: + break; + } + + return 1; +} + +int vmce_amd_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) +{ + switch (msr) { + case MSR_F10_MC4_MISC1: + case MSR_F10_MC4_MISC2: + case MSR_F10_MC4_MISC3: + break; + } + + return 1; +} diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c index 92220981a2..98a0f8d933 100644 --- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c +++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c @@ -64,6 +64,7 @@ #include <asm/msr.h> #include "mce.h" +#include "vmce.h" static struct timer mce_timer; diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c index 5e32e9b250..5f03394699 100644 --- a/xen/arch/x86/cpu/mcheck/mce.c +++ b/xen/arch/x86/cpu/mcheck/mce.c @@ -25,6 +25,7 @@ #include "mce.h" #include "barrier.h" #include "util.h" +#include "vmce.h" bool_t __read_mostly mce_disabled; invbool_param("mce", mce_disabled); diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h index 5b33934851..64e1c40123 100644 --- a/xen/arch/x86/cpu/mcheck/mce.h +++ b/xen/arch/x86/cpu/mcheck/mce.h @@ -49,15 +49,9 @@ void intel_mcheck_timer(struct cpuinfo_x86 *c); void mce_intel_feature_init(struct cpuinfo_x86 *c); void amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c); -int is_vmce_ready(struct mcinfo_bank *bank, struct domain *d); -int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn); - -u64 mce_cap_init(void); +uint64_t mce_cap_init(void); extern unsigned int firstbank; -int intel_mce_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); -int intel_mce_wrmsr(struct vcpu *, uint32_t msr, uint64_t val); - struct mcinfo_extended *intel_get_extended_msrs( struct mcinfo_global *mig, struct mc_info *mi); @@ -69,9 +63,6 @@ void mc_panic(char *s); void x86_mc_get_cpu_info(unsigned, uint32_t *, uint16_t *, uint16_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *); -#define dom0_vmce_enabled() (dom0 && dom0->max_vcpus && dom0->vcpu[0] \ - && guest_enabled_event(dom0->vcpu[0], VIRQ_MCA)) - /* Register a handler for machine check exceptions. */ typedef void (*x86_mce_vector_t)(struct cpu_user_regs *, long); extern void x86_mce_vector_register(x86_mce_vector_t); @@ -166,10 +157,6 @@ void *x86_mcinfo_add(struct mc_info *mi, void *mcinfo); void *x86_mcinfo_reserve(struct mc_info *mi, int size); void x86_mcinfo_dump(struct mc_info *mi); -int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d, - uint64_t gstatus); -int inject_vmce(struct domain *d, int vcpu); - static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr) { switch (boot_cpu_data.x86_vendor) { diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c index 2f7709bd06..254cbc9100 100644 --- a/xen/arch/x86/cpu/mcheck/mce_intel.c +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c @@ -18,6 +18,7 @@ #include "x86_mca.h" #include "barrier.h" #include "util.h" +#include "vmce.h" DEFINE_PER_CPU(struct mca_banks *, mce_banks_owned); DEFINE_PER_CPU(struct mca_banks *, no_cmci_banks); @@ -980,7 +981,7 @@ enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c, bool_t bsp) } /* intel specific MCA MSR */ -int intel_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) +int vmce_intel_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) { unsigned int bank = msr - MSR_IA32_MC0_CTL2; @@ -993,7 +994,7 @@ int intel_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) return 1; } -int intel_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) +int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) { unsigned int bank = msr - MSR_IA32_MC0_CTL2; diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c b/xen/arch/x86/cpu/mcheck/non-fatal.c index 1dded9b06b..16fbae6827 100644 --- a/xen/arch/x86/cpu/mcheck/non-fatal.c +++ b/xen/arch/x86/cpu/mcheck/non-fatal.c @@ -21,6 +21,7 @@ #include <asm/msr.h> #include "mce.h" +#include "vmce.h" DEFINE_PER_CPU(struct mca_banks *, poll_bankmask); static struct timer mce_timer; diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c index cc1d48f524..a486af5c9b 100644 --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -33,8 +33,10 @@ #include <asm/system.h> #include <asm/msr.h> #include <asm/p2m.h> + #include "mce.h" #include "x86_mca.h" +#include "vmce.h" /* * MCG_SER_P: software error recovery supported @@ -143,7 +145,10 @@ static int bank_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) switch ( boot_cpu_data.x86_vendor ) { case X86_VENDOR_INTEL: - ret = intel_mce_rdmsr(v, msr, val); + ret = vmce_intel_rdmsr(v, msr, val); + break; + case X86_VENDOR_AMD: + ret = vmce_amd_rdmsr(v, msr, val); break; default: ret = 0; @@ -200,7 +205,7 @@ int vmce_rdmsr(uint32_t msr, uint64_t *val) * For historic version reason, bank number may greater than GUEST_MC_BANK_NUM, * when migratie from old vMCE version to new vMCE. */ -static int bank_mce_wrmsr(struct vcpu *v, u32 msr, u64 val) +static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) { int ret = 1; unsigned int bank = (msr - MSR_IA32_MC0_CTL) / 4; @@ -238,7 +243,10 @@ static int bank_mce_wrmsr(struct vcpu *v, u32 msr, u64 val) switch ( boot_cpu_data.x86_vendor ) { case X86_VENDOR_INTEL: - ret = intel_mce_wrmsr(v, msr, val); + ret = vmce_intel_wrmsr(v, msr, val); + break; + case X86_VENDOR_AMD: + ret = vmce_amd_wrmsr(v, msr, val); break; default: ret = 0; @@ -255,7 +263,7 @@ static int bank_mce_wrmsr(struct vcpu *v, u32 msr, u64 val) * = 0: Not handled, should be handled by other components * > 0: Success */ -int vmce_wrmsr(u32 msr, u64 val) +int vmce_wrmsr(uint32_t msr, uint64_t val) { struct vcpu *cur = current; int ret = 1; diff --git a/xen/arch/x86/cpu/mcheck/vmce.h b/xen/arch/x86/cpu/mcheck/vmce.h new file mode 100644 index 0000000000..a83db4ac04 --- /dev/null +++ b/xen/arch/x86/cpu/mcheck/vmce.h @@ -0,0 +1,23 @@ +#ifndef _MCHECK_VMCE_H +#define _MCHECK_VMCE_H + +#include "x86_mca.h" + +int vmce_init(struct cpuinfo_x86 *c); + +#define dom0_vmce_enabled() (dom0 && dom0->max_vcpus && dom0->vcpu[0] \ + && guest_enabled_event(dom0->vcpu[0], VIRQ_MCA)) + +int is_vmce_ready(struct mcinfo_bank *bank, struct domain *d); +int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn); + +int vmce_intel_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); +int vmce_intel_wrmsr(struct vcpu *, uint32_t msr, uint64_t val); +int vmce_amd_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); +int vmce_amd_wrmsr(struct vcpu *, uint32_t msr, uint64_t val); + +int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d, + uint64_t gstatus); +int inject_vmce(struct domain *d, int vcpu); + +#endif |