When a machine check or NMI occurs while multiple byte code is patched
the CPU could theoretically see an inconsistent instruction and crash.
Prevent this by temporarily disabling MCEs and returning early in the
NMI handler.
Based on discussion with Mathieu Desnoyers.
Cc: Mathieu Desnoyers <[email protected]>
Cc: Jeremy Fitzhardinge <[email protected]>
Signed-off-by: Andi Kleen <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
#include <asm/alternative.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
+#include <asm/mce.h>
+#include <asm/nmi.h>
#ifdef CONFIG_HOTPLUG_CPU
static int smp_alt_once;
{
unsigned long flags;
+ /* The patching is not fully atomic, so try to avoid local interruptions
+ that might execute the to be patched code.
+ Other CPUs are not running. */
+ stop_nmi();
+#ifdef CONFIG_MCE
+ stop_mce();
+#endif
+
local_irq_save(flags);
apply_alternatives(__alt_instructions, __alt_instructions_end);
#endif
apply_paravirt(__parainstructions, __parainstructions_end);
local_irq_restore(flags);
+
+ restart_nmi();
+#ifdef CONFIG_MCE
+ restart_mce();
+#endif
}
/*
}
}
+static unsigned long old_cr4 __initdata;
+
+void __init stop_mce(void)
+{
+ old_cr4 = read_cr4();
+ clear_in_cr4(X86_CR4_MCE);
+}
+
+void __init restart_mce(void)
+{
+ if (old_cr4 & X86_CR4_MCE)
+ set_in_cr4(X86_CR4_MCE);
+}
+
static int __init mcheck_disable(char *str)
{
mce_disabled = 1;
reassert_nmi();
}
+static int ignore_nmis;
+
fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
{
int cpu;
++nmi_count(cpu);
- default_do_nmi(regs);
+ if (!ignore_nmis)
+ default_do_nmi(regs);
nmi_exit();
}
+void stop_nmi(void)
+{
+ acpi_nmi_disable();
+ ignore_nmis++;
+}
+
+void restart_nmi(void)
+{
+ ignore_nmis--;
+ acpi_nmi_enable();
+}
+
#ifdef CONFIG_KPROBES
fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
{
&mce_chrdev_ops,
};
+static unsigned long old_cr4 __initdata;
+
+void __init stop_mce(void)
+{
+ old_cr4 = read_cr4();
+ clear_in_cr4(X86_CR4_MCE);
+}
+
+void __init restart_mce(void)
+{
+ if (old_cr4 & X86_CR4_MCE)
+ set_in_cr4(X86_CR4_MCE);
+}
+
/*
* Old style boot options parsing. Only for compatibility.
*/
return rc;
}
+static unsigned ignore_nmis;
+
asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
{
nmi_enter();
add_pda(__nmi_count,1);
- default_do_nmi(regs);
+ if (!ignore_nmis)
+ default_do_nmi(regs);
nmi_exit();
}
return 0;
}
+void stop_nmi(void)
+{
+ acpi_nmi_disable();
+ ignore_nmis++;
+}
+
+void restart_nmi(void)
+{
+ ignore_nmis--;
+ acpi_nmi_enable();
+}
+
#ifdef CONFIG_SYSCTL
static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
#endif
extern int mce_disabled;
+
+extern void stop_mce(void);
+extern void restart_mce(void);
+
int lapic_watchdog_ok(void);
void disable_lapic_nmi_watchdog(void);
void enable_lapic_nmi_watchdog(void);
+void stop_nmi(void);
+void restart_nmi(void);
#endif /* ASM_NMI_H */
extern int mce_notify_user(void);
+extern void stop_mce(void);
+extern void restart_mce(void);
+
#endif
#endif
int lapic_watchdog_ok(void);
void disable_lapic_nmi_watchdog(void);
void enable_lapic_nmi_watchdog(void);
+void stop_nmi(void);
+void restart_nmi(void);
#endif /* ASM_NMI_H */