Separate Intel nonfatal MCE initialization code from generic MCE code, the same
way it is done for AMD code. This is to be able to later make intel/amd MCE
code optional in the build.

Also clean up unused includes.

No functional change intended.

Signed-off-by: Sergiy Kibrik <[email protected]>
---
 xen/arch/x86/cpu/mcheck/Makefile         |  1 +
 xen/arch/x86/cpu/mcheck/intel_nonfatal.c | 83 ++++++++++++++++++++++++
 xen/arch/x86/cpu/mcheck/mce.h            |  1 +
 xen/arch/x86/cpu/mcheck/non-fatal.c      | 82 +----------------------
 4 files changed, 86 insertions(+), 81 deletions(-)
 create mode 100644 xen/arch/x86/cpu/mcheck/intel_nonfatal.c

diff --git a/xen/arch/x86/cpu/mcheck/Makefile b/xen/arch/x86/cpu/mcheck/Makefile
index 0d63ff9096..bb9908dede 100644
--- a/xen/arch/x86/cpu/mcheck/Makefile
+++ b/xen/arch/x86/cpu/mcheck/Makefile
@@ -2,6 +2,7 @@ obj-y += amd_nonfatal.o
 obj-y += mce_amd.o
 obj-y += mcaction.o
 obj-y += barrier.o
+obj-y += intel_nonfatal.o
 obj-y += mctelem.o
 obj-y += mce.o
 obj-y += mce-apei.o
diff --git a/xen/arch/x86/cpu/mcheck/intel_nonfatal.c 
b/xen/arch/x86/cpu/mcheck/intel_nonfatal.c
new file mode 100644
index 0000000000..0085948dba
--- /dev/null
+++ b/xen/arch/x86/cpu/mcheck/intel_nonfatal.c
@@ -0,0 +1,83 @@
+/*
+ * Non Fatal Machine Check Exception Reporting
+ *
+ * (C) Copyright 2002 Dave Jones. <[email protected]>
+ *
+ * This file contains routines to check for non-fatal MCEs every 15s
+ *
+ */
+
+#include <xen/event.h>
+
+#include "mce.h"
+#include "vmce.h"
+
+static struct timer mce_timer;
+
+#define MCE_PERIOD MILLISECS(8000)
+#define MCE_PERIOD_MIN MILLISECS(2000)
+#define MCE_PERIOD_MAX MILLISECS(16000)
+
+static uint64_t period = MCE_PERIOD;
+static int adjust = 0;
+static int variable_period = 1;
+
+static void cf_check mce_checkregs(void *info)
+{
+       mctelem_cookie_t mctc;
+       struct mca_summary bs;
+       static uint64_t dumpcount = 0;
+
+       mctc = mcheck_mca_logout(MCA_POLLER, this_cpu(poll_bankmask),
+                                &bs, NULL);
+
+       if (bs.errcnt && mctc != NULL) {
+               adjust++;
+
+               /* If Dom0 enabled the VIRQ_MCA event, then notify it.
+                * Otherwise, if dom0 has had plenty of time to register
+                * the virq handler but still hasn't then dump telemetry
+                * to the Xen console.  The call count may be incremented
+                * on multiple cpus at once and is indicative only - just
+                * a simple-minded attempt to avoid spamming the console
+                * for corrected errors in early startup.
+                */
+
+               if (dom0_vmce_enabled()) {
+                       mctelem_commit(mctc);
+                       send_global_virq(VIRQ_MCA);
+               } else if (++dumpcount >= 10) {
+                       x86_mcinfo_dump((struct mc_info 
*)mctelem_dataptr(mctc));
+                       mctelem_dismiss(mctc);
+               } else {
+                       mctelem_dismiss(mctc);
+               }
+       } else if (mctc != NULL) {
+               mctelem_dismiss(mctc);
+       }
+}
+
+static void cf_check mce_work_fn(void *data)
+{
+       on_each_cpu(mce_checkregs, NULL, 1);
+
+       if (variable_period) {
+               if (adjust)
+                       period /= (adjust + 1);
+               else
+                       period *= 2;
+               if (period > MCE_PERIOD_MAX)
+                       period = MCE_PERIOD_MAX;
+               if (period < MCE_PERIOD_MIN)
+                       period = MCE_PERIOD_MIN;
+       }
+
+       set_timer(&mce_timer, NOW() + period);
+       adjust = 0;
+}
+
+void __init intel_nonfatal_mcheck_init(struct cpuinfo_x86 *unused)
+{
+       init_timer(&mce_timer, mce_work_fn, NULL, 0);
+       set_timer(&mce_timer, NOW() + MCE_PERIOD);
+}
diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h
index 7f26afae23..4806405f96 100644
--- a/xen/arch/x86/cpu/mcheck/mce.h
+++ b/xen/arch/x86/cpu/mcheck/mce.h
@@ -47,6 +47,7 @@ enum mcheck_type amd_mcheck_init(const struct cpuinfo_x86 *c, 
bool bsp);
 enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c, bool bsp);
 
 void amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
+void intel_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
 
 extern unsigned int firstbank;
 extern unsigned int ppin_msr;
diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c 
b/xen/arch/x86/cpu/mcheck/non-fatal.c
index 1c0c32ba08..33cacd15c2 100644
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -7,84 +7,7 @@
  *
  */
 
-#include <xen/init.h>
-#include <xen/types.h>
-#include <xen/kernel.h>
-#include <xen/smp.h>
-#include <xen/timer.h>
-#include <xen/errno.h>
-#include <xen/event.h>
-#include <xen/sched.h>
-#include <asm/processor.h> 
-#include <asm/system.h>
-#include <asm/msr.h>
-
 #include "mce.h"
-#include "vmce.h"
-
-static struct timer mce_timer;
-
-#define MCE_PERIOD MILLISECS(8000)
-#define MCE_PERIOD_MIN MILLISECS(2000)
-#define MCE_PERIOD_MAX MILLISECS(16000)
-
-static uint64_t period = MCE_PERIOD;
-static int adjust = 0;
-static int variable_period = 1;
-
-static void cf_check mce_checkregs(void *info)
-{
-       mctelem_cookie_t mctc;
-       struct mca_summary bs;
-       static uint64_t dumpcount = 0;
-
-       mctc = mcheck_mca_logout(MCA_POLLER, this_cpu(poll_bankmask),
-                                &bs, NULL);
-
-       if (bs.errcnt && mctc != NULL) {
-               adjust++;
-
-               /* If Dom0 enabled the VIRQ_MCA event, then notify it.
-                * Otherwise, if dom0 has had plenty of time to register
-                * the virq handler but still hasn't then dump telemetry
-                * to the Xen console.  The call count may be incremented
-                * on multiple cpus at once and is indicative only - just
-                * a simple-minded attempt to avoid spamming the console
-                * for corrected errors in early startup.
-                */
-
-               if (dom0_vmce_enabled()) {
-                       mctelem_commit(mctc);
-                       send_global_virq(VIRQ_MCA);
-               } else if (++dumpcount >= 10) {
-                       x86_mcinfo_dump((struct mc_info 
*)mctelem_dataptr(mctc));
-                       mctelem_dismiss(mctc);
-               } else {
-                       mctelem_dismiss(mctc);
-               }
-       } else if (mctc != NULL) {
-               mctelem_dismiss(mctc);
-       }
-}
-
-static void cf_check mce_work_fn(void *data)
-{ 
-       on_each_cpu(mce_checkregs, NULL, 1);
-
-       if (variable_period) {
-               if (adjust)
-                       period /= (adjust + 1);
-               else
-                       period *= 2;
-               if (period > MCE_PERIOD_MAX)
-                       period = MCE_PERIOD_MAX;
-               if (period < MCE_PERIOD_MIN)
-                       period = MCE_PERIOD_MIN;
-       }
-
-       set_timer(&mce_timer, NOW() + period);
-       adjust = 0;
-}
 
 static int __init cf_check init_nonfatal_mce_checker(void)
 {
@@ -106,13 +29,10 @@ static int __init cf_check init_nonfatal_mce_checker(void)
                /* Assume we are on K8 or newer AMD or Hygon CPU here */
                amd_nonfatal_mcheck_init(c);
                break;
-
        case X86_VENDOR_INTEL:
-               init_timer(&mce_timer, mce_work_fn, NULL, 0);
-               set_timer(&mce_timer, NOW() + MCE_PERIOD);
+               intel_nonfatal_mcheck_init(c);
                break;
        }
-
        printk(KERN_INFO "mcheck_poll: Machine check polling timer started.\n");
        return 0;
 }
-- 
2.25.1


Reply via email to