aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/cpu
diff options
context:
space:
mode:
authorChristoph Egger <Christoph.Egger@amd.com>2012-10-25 14:28:09 +0200
committerChristoph Egger <Christoph.Egger@amd.com>2012-10-25 14:28:09 +0200
commit78c579426fb565e5eb446ab653176fe7f2f7c4c4 (patch)
treeafcafd0c516a805df4ab083c0d0e9a521787d816 /xen/arch/x86/cpu
parent2392dcbcdeb0b038faa523c0f57735c14aa2d60b (diff)
downloadxen-78c579426fb565e5eb446ab653176fe7f2f7c4c4.tar.gz
xen-78c579426fb565e5eb446ab653176fe7f2f7c4c4.tar.bz2
xen-78c579426fb565e5eb446ab653176fe7f2f7c4c4.zip
x86/MCE: Implement clearbank callback for AMD
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com> Move initialization of mce_clear_banks into common code (would not get initialized on AMD CPUs otherwise). Mark per-CPU struct mce_bank pointers read-mostly. Signed-off-by: Jan Beulich <jbeulich@suse.com> Committed-by: Jan Beulich <jbeulich@suse.com>
Diffstat (limited to 'xen/arch/x86/cpu')
-rw-r--r--xen/arch/x86/cpu/mcheck/amd_k8.c19
-rw-r--r--xen/arch/x86/cpu/mcheck/mce.c43
-rw-r--r--xen/arch/x86/cpu/mcheck/mce.h1
-rw-r--r--xen/arch/x86/cpu/mcheck/mce_intel.c34
-rw-r--r--xen/arch/x86/cpu/mcheck/mctelem.h4
-rw-r--r--xen/arch/x86/cpu/mcheck/non-fatal.c1
6 files changed, 60 insertions, 42 deletions
diff --git a/xen/arch/x86/cpu/mcheck/amd_k8.c b/xen/arch/x86/cpu/mcheck/amd_k8.c
index 6516a82be5..8ff359cb87 100644
--- a/xen/arch/x86/cpu/mcheck/amd_k8.c
+++ b/xen/arch/x86/cpu/mcheck/amd_k8.c
@@ -72,7 +72,23 @@
/* Machine Check Handler for AMD K8 family series */
static void k8_machine_check(struct cpu_user_regs *regs, long error_code)
{
- mcheck_cmn_handler(regs, error_code, mca_allbanks, NULL);
+ mcheck_cmn_handler(regs, error_code, mca_allbanks,
+ __get_cpu_var(mce_clear_banks));
+}
+
+static int k8_need_clearbank_scan(enum mca_source who, uint64_t status)
+{
+ if (who != MCA_MCE_SCAN)
+ return 1;
+
+ /*
+ * For fatal error, it shouldn't be cleared so that sticky bank
+ * have a chance to be handled after reboot by polling.
+ */
+ if ((status & MCi_STATUS_UC) && (status & MCi_STATUS_PCC))
+ return 0;
+
+ return 1;
}
/* AMD K8 machine check */
@@ -85,6 +101,7 @@ enum mcheck_type amd_k8_mcheck_init(struct cpuinfo_x86 *c)
mce_handler_init();
x86_mce_vector_register(k8_machine_check);
+ mce_need_clearbank_register(k8_need_clearbank_scan);
for (i = 0; i < nr_mce_banks; i++) {
if (quirkflag == MCEQUIRK_K8_GART && i == 4) {
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 75f0f73a1d..7d4743e8ed 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -35,6 +35,10 @@ bool_t is_mc_panic;
unsigned int __read_mostly nr_mce_banks;
unsigned int __read_mostly firstbank;
+DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, poll_bankmask);
+DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, no_cmci_banks);
+DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, mce_clear_banks);
+
static void intpose_init(void);
static void mcinfo_clear(struct mc_info *);
struct mca_banks *mca_allbanks;
@@ -691,22 +695,29 @@ int mca_cap_init(void)
return mca_allbanks ? 0:-ENOMEM;
}
-static void cpu_poll_bankmask_free(unsigned int cpu)
+static void cpu_bank_free(unsigned int cpu)
{
- struct mca_banks *mb = per_cpu(poll_bankmask, cpu);
+ struct mca_banks *poll = per_cpu(poll_bankmask, cpu);
+ struct mca_banks *clr = per_cpu(mce_clear_banks, cpu);
- mcabanks_free(mb);
+ mcabanks_free(poll);
+ mcabanks_free(clr);
}
-static int cpu_poll_bankmask_alloc(unsigned int cpu)
+static int cpu_bank_alloc(unsigned int cpu)
{
- struct mca_banks *mb;
+ struct mca_banks *poll = mcabanks_alloc();
+ struct mca_banks *clr = mcabanks_alloc();
- mb = mcabanks_alloc();
- if ( !mb )
+ if ( !poll || !clr )
+ {
+ mcabanks_free(poll);
+ mcabanks_free(clr);
return -ENOMEM;
+ }
- per_cpu(poll_bankmask, cpu) = mb;
+ per_cpu(poll_bankmask, cpu) = poll;
+ per_cpu(mce_clear_banks, cpu) = clr;
return 0;
}
@@ -719,11 +730,11 @@ static int cpu_callback(
switch ( action )
{
case CPU_UP_PREPARE:
- rc = cpu_poll_bankmask_alloc(cpu);
+ rc = cpu_bank_alloc(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
- cpu_poll_bankmask_free(cpu);
+ cpu_bank_free(cpu);
break;
default:
break;
@@ -757,6 +768,10 @@ void mcheck_init(struct cpuinfo_x86 *c, bool_t bsp)
if (mca_cap_init())
return;
+ /* Early MCE initialisation for BSP. */
+ if ( bsp && cpu_bank_alloc(smp_processor_id()) )
+ BUG();
+
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
inited = amd_mcheck_init(c);
@@ -787,18 +802,14 @@ void mcheck_init(struct cpuinfo_x86 *c, bool_t bsp)
set_in_cr4(X86_CR4_MCE);
if ( bsp )
- {
- /* Early MCE initialisation for BSP. */
- if ( cpu_poll_bankmask_alloc(0) )
- BUG();
register_cpu_notifier(&cpu_nfb);
- }
set_poll_bankmask(c);
return;
out:
- if (smp_processor_id() == 0)
+ if ( bsp )
{
+ cpu_bank_free(smp_processor_id());
mcabanks_free(mca_allbanks);
mca_allbanks = NULL;
}
diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h
index 11dae80b4c..21f65d2384 100644
--- a/xen/arch/x86/cpu/mcheck/mce.h
+++ b/xen/arch/x86/cpu/mcheck/mce.h
@@ -122,6 +122,7 @@ struct mca_summary {
DECLARE_PER_CPU(struct mca_banks *, poll_bankmask);
DECLARE_PER_CPU(struct mca_banks *, no_cmci_banks);
+DECLARE_PER_CPU(struct mca_banks *, mce_clear_banks);
extern bool_t cmci_support;
extern bool_t is_mc_panic;
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c
index e70bd55b1d..d80f692496 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -21,9 +21,7 @@
#include "vmce.h"
#include "mcaction.h"
-DEFINE_PER_CPU(struct mca_banks *, mce_banks_owned);
-DEFINE_PER_CPU(struct mca_banks *, no_cmci_banks);
-DEFINE_PER_CPU(struct mca_banks *, mce_clear_banks);
+static DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, mce_banks_owned);
bool_t __read_mostly cmci_support = 0;
static bool_t __read_mostly ser_support = 0;
static bool_t __read_mostly mce_force_broadcast;
@@ -789,36 +787,28 @@ static void intel_init_mce(void)
static void cpu_mcabank_free(unsigned int cpu)
{
- struct mca_banks *mb1, *mb2, *mb3;
+ struct mca_banks *cmci = per_cpu(no_cmci_banks, cpu);
+ struct mca_banks *owned = per_cpu(mce_banks_owned, cpu);
- mb1 = per_cpu(mce_clear_banks, cpu);
- mb2 = per_cpu(no_cmci_banks, cpu);
- mb3 = per_cpu(mce_banks_owned, cpu);
-
- mcabanks_free(mb1);
- mcabanks_free(mb2);
- mcabanks_free(mb3);
+ mcabanks_free(cmci);
+ mcabanks_free(owned);
}
static int cpu_mcabank_alloc(unsigned int cpu)
{
- struct mca_banks *mb1, *mb2, *mb3;
+ struct mca_banks *cmci = mcabanks_alloc();
+ struct mca_banks *owned = mcabanks_alloc();
- mb1 = mcabanks_alloc();
- mb2 = mcabanks_alloc();
- mb3 = mcabanks_alloc();
- if (!mb1 || !mb2 || !mb3)
+ if (!cmci || !owned)
goto out;
- per_cpu(mce_clear_banks, cpu) = mb1;
- per_cpu(no_cmci_banks, cpu) = mb2;
- per_cpu(mce_banks_owned, cpu) = mb3;
+ per_cpu(no_cmci_banks, cpu) = cmci;
+ per_cpu(mce_banks_owned, cpu) = owned;
return 0;
out:
- mcabanks_free(mb1);
- mcabanks_free(mb2);
- mcabanks_free(mb3);
+ mcabanks_free(cmci);
+ mcabanks_free(owned);
return -ENOMEM;
}
diff --git a/xen/arch/x86/cpu/mcheck/mctelem.h b/xen/arch/x86/cpu/mcheck/mctelem.h
index 04edf98656..e5514d9891 100644
--- a/xen/arch/x86/cpu/mcheck/mctelem.h
+++ b/xen/arch/x86/cpu/mcheck/mctelem.h
@@ -23,7 +23,7 @@
* urgent uses, intended for use from machine check exception handlers,
* and non-urgent uses intended for use from error pollers.
* Associated with each logout entry of whatever class is a data area
- * sized per the single argument to mctelem_init. mcelem_init should be
+ * sized per the single argument to mctelem_init. mctelem_init should be
* called from MCA init code before anybody has the chance to change the
* machine check vector with mcheck_mca_logout or to use mcheck_mca_logout.
*
@@ -45,7 +45,7 @@
* which will return a cookie referencing the oldest (first committed)
* entry of the requested class. Access the associated data using
* mctelem_dataptr and when finished use mctelem_consume_oldest_end - in the
- * begin .. end bracket you are guaranteed that the entry canot be freed
+ * begin .. end bracket you are guaranteed that the entry can't be freed
* even if it is ack'd elsewhere). Once the ultimate consumer of the
* telemetry has processed it to stable storage it should acknowledge
* the telemetry quoting the cookie id, at which point we will free
diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c b/xen/arch/x86/cpu/mcheck/non-fatal.c
index 16fbae6827..2e79fbdc1c 100644
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -23,7 +23,6 @@
#include "mce.h"
#include "vmce.h"
-DEFINE_PER_CPU(struct mca_banks *, poll_bankmask);
static struct timer mce_timer;
#define MCE_PERIOD MILLISECS(8000)