The SMP version of arch_local_irq_enable() uses preempt_disable(), but
<asm/irqflags.h> doesn't include <linux/preempt.h> causing the following
errors on SMP when pstore/ftrace is enabled (caught by buildbot smp
allyesconfig):
In file included from include/linux/irqflags.h:15,
from fs/pstore/ftrace.c:16:
arch/metag/include/asm/irqflags.h: In function 'arch_local_irq_enable':
arch/metag/include/asm/irqflags.h:84: error: implicit declaration of function 'preempt_disable'
arch/metag/include/asm/irqflags.h:86: error: implicit declaration of function 'preempt_enable_no_resched'
However <linux/preempt.h> cannot be easily included from
<asm/irqflags.h> as it can cause circular include dependencies in the
!SMP case, and potentially in the SMP case in the future. Therefore move
the SMP implementation of arch_local_irq_enable() into traps.c and use
an inline version of get_trigger_mask() which is also defined in traps.c
for SMP.
This adds an extra layer of function call / stack push when
preempt_disable needs to call other functions, however in the
non-preemptive SMP case it should be about as fast, as it was already
calling the get_trigger_mask() function which is now used inline.
Signed-off-by: James Hogan <[email protected]>
asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
}
-static inline void arch_local_irq_enable(void)
-{
#ifdef CONFIG_SMP
- preempt_disable();
- arch_local_irq_restore(get_trigger_mask());
- preempt_enable_no_resched();
+/* Avoid circular include dependencies through <linux/preempt.h> */
+void arch_local_irq_enable(void);
#else
+static inline void arch_local_irq_enable(void)
+{
arch_local_irq_restore(get_trigger_mask());
-#endif
}
+#endif
#endif /* (__ASSEMBLY__) */
#include <linux/types.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/preempt.h>
#include <linux/ptrace.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#endif
#ifdef CONFIG_SMP
-unsigned int get_trigger_mask(void)
+static inline unsigned int _get_trigger_mask(void)
{
unsigned long cpu = smp_processor_id();
return per_cpu(trigger_mask, cpu);
}
+unsigned int get_trigger_mask(void)
+{
+ return _get_trigger_mask();
+}
+
static void set_trigger_mask(unsigned int mask)
{
unsigned long cpu = smp_processor_id();
per_cpu(trigger_mask, cpu) = mask;
}
+
+void arch_local_irq_enable(void)
+{
+ preempt_disable();
+ arch_local_irq_restore(_get_trigger_mask());
+ preempt_enable_no_resched();
+}
+EXPORT_SYMBOL(arch_local_irq_enable);
#else
static void set_trigger_mask(unsigned int mask)
{