forked from luck/tmp_suning_uos_patched
92cf211874
preempt_mask.h defines all the preempt_count semantics and related symbols: preempt, softirq, hardirq, nmi, preempt active, need resched, etc... preempt.h defines the accessors and mutators of preempt_count. But there is a messy dependency game around those two header files: * preempt_mask.h includes preempt.h in order to access preempt_count() * preempt_mask.h defines all preempt_count semantic and symbols except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h Thus we need to define it from preempt.h, right before including asm/preempt.h, instead of defining it to preempt_mask.h with the other preempt_count symbols. Therefore the preempt_count semantics happen to be spread out. * We plan to introduce preempt_active_[enter,exit]() to consolidate preempt_schedule*() code. But we'll need to access both preempt_count mutators (preempt_count_add()) and preempt_count symbols (PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt operations is in preempt.h but then we'll need symbols in preempt_mask.h which already includes preempt.h. So we end up with a ressource circle dependency. Lets merge preempt_mask.h into preempt.h to solve these dependency issues. This way we gather semantic symbols and operation definition of preempt_count in a single file. This is a dumb copy-paste merge. Further merge re-arrangments are performed in a subsequent patch to ease review. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
80 lines
1.6 KiB
C
80 lines
1.6 KiB
C
#ifndef _M68K_IRQFLAGS_H
|
|
#define _M68K_IRQFLAGS_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/preempt.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/entry.h>
|
|
|
|
static inline unsigned long arch_local_save_flags(void)
|
|
{
|
|
unsigned long flags;
|
|
asm volatile ("movew %%sr,%0" : "=d" (flags) : : "memory");
|
|
return flags;
|
|
}
|
|
|
|
static inline void arch_local_irq_disable(void)
|
|
{
|
|
#ifdef CONFIG_COLDFIRE
|
|
asm volatile (
|
|
"move %/sr,%%d0 \n\t"
|
|
"ori.l #0x0700,%%d0 \n\t"
|
|
"move %%d0,%/sr \n"
|
|
: /* no outputs */
|
|
:
|
|
: "cc", "%d0", "memory");
|
|
#else
|
|
asm volatile ("oriw #0x0700,%%sr" : : : "memory");
|
|
#endif
|
|
}
|
|
|
|
static inline void arch_local_irq_enable(void)
|
|
{
|
|
#if defined(CONFIG_COLDFIRE)
|
|
asm volatile (
|
|
"move %/sr,%%d0 \n\t"
|
|
"andi.l #0xf8ff,%%d0 \n\t"
|
|
"move %%d0,%/sr \n"
|
|
: /* no outputs */
|
|
:
|
|
: "cc", "%d0", "memory");
|
|
#else
|
|
# if defined(CONFIG_MMU)
|
|
if (MACH_IS_Q40 || !hardirq_count())
|
|
# endif
|
|
asm volatile (
|
|
"andiw %0,%%sr"
|
|
:
|
|
: "i" (ALLOWINT)
|
|
: "memory");
|
|
#endif
|
|
}
|
|
|
|
static inline unsigned long arch_local_irq_save(void)
|
|
{
|
|
unsigned long flags = arch_local_save_flags();
|
|
arch_local_irq_disable();
|
|
return flags;
|
|
}
|
|
|
|
static inline void arch_local_irq_restore(unsigned long flags)
|
|
{
|
|
asm volatile ("movew %0,%%sr" : : "d" (flags) : "memory");
|
|
}
|
|
|
|
static inline bool arch_irqs_disabled_flags(unsigned long flags)
|
|
{
|
|
if (MACH_IS_ATARI) {
|
|
/* Ignore HSYNC = ipl 2 on Atari */
|
|
return (flags & ~(ALLOWINT | 0x200)) != 0;
|
|
}
|
|
return (flags & ~ALLOWINT) != 0;
|
|
}
|
|
|
|
static inline bool arch_irqs_disabled(void)
|
|
{
|
|
return arch_irqs_disabled_flags(arch_local_save_flags());
|
|
}
|
|
|
|
#endif /* _M68K_IRQFLAGS_H */
|